Update boot image and system server profiles [M5C5P80S0] am: e496816c88
Original change: https://googleplex-android-review.googlesource.com/c/platform/art/+/18576728
Change-Id: I39cf72d67d8204961060671cfb33e4efea5f8dae
Signed-off-by: Automerger Merge Worker <android-build-automerger-merge-worker@system.gserviceaccount.com>
diff --git a/Android.mk b/Android.mk
index 9257c22..15c7989 100644
--- a/Android.mk
+++ b/Android.mk
@@ -882,7 +882,7 @@
define create_public_sdk_dex
public_sdk_$(1)_stub := $$(call get_public_sdk_stub_dex,$(1))
$$(public_sdk_$(1)_stub): PRIVATE_MIN_SDK_VERSION := $(1)
-$$(public_sdk_$(1)_stub): $$(call resolve-prebuilt-sdk-jar-path,$(1)) $$(DX) $$(ZIP2ZIP)
+$$(public_sdk_$(1)_stub): $$(call resolve-prebuilt-sdk-jar-path,$(1)) $$(D8) $$(ZIP2ZIP)
$$(transform-classes.jar-to-dex)
$$(call declare-1p-target,$$(public_sdk_$(1)_stub),art)
diff --git a/TEST_MAPPING b/TEST_MAPPING
index 300526f..91a19d2 100644
--- a/TEST_MAPPING
+++ b/TEST_MAPPING
@@ -401,6 +401,12 @@
"name": "art-run-test-2042-checker-dce-always-throw[com.google.android.art.apex]"
},
{
+ "name": "art-run-test-2042-reference-processing[com.google.android.art.apex]"
+ },
+ {
+ "name": "art-run-test-2043-reference-pauses[com.google.android.art.apex]"
+ },
+ {
"name": "art-run-test-2231-checker-heap-poisoning[com.google.android.art.apex]"
},
{
@@ -1699,6 +1705,12 @@
"name": "art-run-test-2042-checker-dce-always-throw"
},
{
+ "name": "art-run-test-2042-reference-processing"
+ },
+ {
+ "name": "art-run-test-2043-reference-pauses"
+ },
+ {
"name": "art-run-test-2231-checker-heap-poisoning"
},
{
@@ -2586,5 +2598,1294 @@
{
"name": "libnativeloader_test"
}
+ ],
+ "hwasan-presubmit": [
+ {
+ "name": "ArtServiceTests"
+ },
+ {
+ "name": "ComposHostTestCases"
+ },
+ {
+ "name": "art_standalone_dexpreopt_tests"
+ },
+ {
+ "name": "art-run-test-001-HelloWorld"
+ },
+ {
+ "name": "art-run-test-001-Main"
+ },
+ {
+ "name": "art-run-test-002-sleep"
+ },
+ {
+ "name": "art-run-test-004-InterfaceTest"
+ },
+ {
+ "name": "art-run-test-004-NativeAllocations"
+ },
+ {
+ "name": "art-run-test-004-checker-UnsafeTest18"
+ },
+ {
+ "name": "art-run-test-006-args"
+ },
+ {
+ "name": "art-run-test-007-count10"
+ },
+ {
+ "name": "art-run-test-009-instanceof"
+ },
+ {
+ "name": "art-run-test-010-instance"
+ },
+ {
+ "name": "art-run-test-011-array-copy"
+ },
+ {
+ "name": "art-run-test-012-math"
+ },
+ {
+ "name": "art-run-test-013-math2"
+ },
+ {
+ "name": "art-run-test-014-math3"
+ },
+ {
+ "name": "art-run-test-015-switch"
+ },
+ {
+ "name": "art-run-test-016-intern"
+ },
+ {
+ "name": "art-run-test-017-float"
+ },
+ {
+ "name": "art-run-test-018-stack-overflow"
+ },
+ {
+ "name": "art-run-test-019-wrong-array-type"
+ },
+ {
+ "name": "art-run-test-020-string"
+ },
+ {
+ "name": "art-run-test-021-string2"
+ },
+ {
+ "name": "art-run-test-022-interface"
+ },
+ {
+ "name": "art-run-test-025-access-controller"
+ },
+ {
+ "name": "art-run-test-026-access"
+ },
+ {
+ "name": "art-run-test-027-arithmetic"
+ },
+ {
+ "name": "art-run-test-028-array-write"
+ },
+ {
+ "name": "art-run-test-029-assert"
+ },
+ {
+ "name": "art-run-test-033-class-init-deadlock"
+ },
+ {
+ "name": "art-run-test-035-enum"
+ },
+ {
+ "name": "art-run-test-036-finalizer"
+ },
+ {
+ "name": "art-run-test-037-inherit"
+ },
+ {
+ "name": "art-run-test-039-join-main"
+ },
+ {
+ "name": "art-run-test-040-miranda"
+ },
+ {
+ "name": "art-run-test-041-narrowing"
+ },
+ {
+ "name": "art-run-test-043-privates"
+ },
+ {
+ "name": "art-run-test-045-reflect-array"
+ },
+ {
+ "name": "art-run-test-046-reflect"
+ },
+ {
+ "name": "art-run-test-047-returns"
+ },
+ {
+ "name": "art-run-test-048-reflect-v8"
+ },
+ {
+ "name": "art-run-test-049-show-object"
+ },
+ {
+ "name": "art-run-test-050-sync-test"
+ },
+ {
+ "name": "art-run-test-052-verifier-fun"
+ },
+ {
+ "name": "art-run-test-053-wait-some"
+ },
+ {
+ "name": "art-run-test-055-enum-performance"
+ },
+ {
+ "name": "art-run-test-058-enum-order"
+ },
+ {
+ "name": "art-run-test-059-finalizer-throw"
+ },
+ {
+ "name": "art-run-test-061-out-of-memory"
+ },
+ {
+ "name": "art-run-test-062-character-encodings"
+ },
+ {
+ "name": "art-run-test-063-process-manager"
+ },
+ {
+ "name": "art-run-test-067-preemptive-unpark"
+ },
+ {
+ "name": "art-run-test-070-nio-buffer"
+ },
+ {
+ "name": "art-run-test-072-precise-gc"
+ },
+ {
+ "name": "art-run-test-072-reachability-fence"
+ },
+ {
+ "name": "art-run-test-074-gc-thrash"
+ },
+ {
+ "name": "art-run-test-076-boolean-put"
+ },
+ {
+ "name": "art-run-test-078-polymorphic-virtual"
+ },
+ {
+ "name": "art-run-test-079-phantom"
+ },
+ {
+ "name": "art-run-test-080-oom-fragmentation"
+ },
+ {
+ "name": "art-run-test-080-oom-throw"
+ },
+ {
+ "name": "art-run-test-080-oom-throw-with-finalizer"
+ },
+ {
+ "name": "art-run-test-081-hot-exceptions"
+ },
+ {
+ "name": "art-run-test-082-inline-execute"
+ },
+ {
+ "name": "art-run-test-083-compiler-regressions"
+ },
+ {
+ "name": "art-run-test-084-class-init"
+ },
+ {
+ "name": "art-run-test-090-loop-formation"
+ },
+ {
+ "name": "art-run-test-092-locale"
+ },
+ {
+ "name": "art-run-test-093-serialization"
+ },
+ {
+ "name": "art-run-test-094-pattern"
+ },
+ {
+ "name": "art-run-test-095-switch-MAX_INT"
+ },
+ {
+ "name": "art-run-test-096-array-copy-concurrent-gc"
+ },
+ {
+ "name": "art-run-test-099-vmdebug"
+ },
+ {
+ "name": "art-run-test-100-reflect2"
+ },
+ {
+ "name": "art-run-test-1000-non-moving-space-stress"
+ },
+ {
+ "name": "art-run-test-1004-checker-volatile-ref-load"
+ },
+ {
+ "name": "art-run-test-101-fibonacci"
+ },
+ {
+ "name": "art-run-test-102-concurrent-gc"
+ },
+ {
+ "name": "art-run-test-103-string-append"
+ },
+ {
+ "name": "art-run-test-104-growth-limit"
+ },
+ {
+ "name": "art-run-test-105-invoke"
+ },
+ {
+ "name": "art-run-test-106-exceptions2"
+ },
+ {
+ "name": "art-run-test-107-int-math2"
+ },
+ {
+ "name": "art-run-test-108-check-cast"
+ },
+ {
+ "name": "art-run-test-109-suspend-check"
+ },
+ {
+ "name": "art-run-test-110-field-access"
+ },
+ {
+ "name": "art-run-test-112-double-math"
+ },
+ {
+ "name": "art-run-test-114-ParallelGC"
+ },
+ {
+ "name": "art-run-test-120-hashcode"
+ },
+ {
+ "name": "art-run-test-121-simple-suspend-check"
+ },
+ {
+ "name": "art-run-test-122-npe"
+ },
+ {
+ "name": "art-run-test-123-compiler-regressions-mt"
+ },
+ {
+ "name": "art-run-test-123-inline-execute2"
+ },
+ {
+ "name": "art-run-test-125-gc-and-classloading"
+ },
+ {
+ "name": "art-run-test-128-reg-spill-on-implicit-nullcheck"
+ },
+ {
+ "name": "art-run-test-129-ThreadGetId"
+ },
+ {
+ "name": "art-run-test-132-daemon-locks-shutdown"
+ },
+ {
+ "name": "art-run-test-133-static-invoke-super"
+ },
+ {
+ "name": "art-run-test-1338-gc-no-los"
+ },
+ {
+ "name": "art-run-test-140-dce-regression"
+ },
+ {
+ "name": "art-run-test-140-field-packing"
+ },
+ {
+ "name": "art-run-test-143-string-value"
+ },
+ {
+ "name": "art-run-test-144-static-field-sigquit"
+ },
+ {
+ "name": "art-run-test-145-alloc-tracking-stress"
+ },
+ {
+ "name": "art-run-test-151-OpenFileLimit"
+ },
+ {
+ "name": "art-run-test-152-dead-large-object"
+ },
+ {
+ "name": "art-run-test-153-reference-stress"
+ },
+ {
+ "name": "art-run-test-156-register-dex-file-multi-loader"
+ },
+ {
+ "name": "art-run-test-159-app-image-fields"
+ },
+ {
+ "name": "art-run-test-160-read-barrier-stress"
+ },
+ {
+ "name": "art-run-test-163-app-image-methods"
+ },
+ {
+ "name": "art-run-test-165-lock-owner-proxy"
+ },
+ {
+ "name": "art-run-test-168-vmstack-annotated"
+ },
+ {
+ "name": "art-run-test-170-interface-init"
+ },
+ {
+ "name": "art-run-test-174-escaping-instance-of-bad-class"
+ },
+ {
+ "name": "art-run-test-175-alloc-big-bignums"
+ },
+ {
+ "name": "art-run-test-176-app-image-string"
+ },
+ {
+ "name": "art-run-test-1960-checker-bounds-codegen"
+ },
+ {
+ "name": "art-run-test-1961-checker-loop-vectorizer"
+ },
+ {
+ "name": "art-run-test-201-built-in-except-detail-messages"
+ },
+ {
+ "name": "art-run-test-2019-constantcalculationsinking"
+ },
+ {
+ "name": "art-run-test-202-thread-oome"
+ },
+ {
+ "name": "art-run-test-2020-InvokeVirtual-Inlining"
+ },
+ {
+ "name": "art-run-test-2021-InvokeStatic-Inlining"
+ },
+ {
+ "name": "art-run-test-2022-Invariantloops"
+ },
+ {
+ "name": "art-run-test-2023-InvariantLoops_typecast"
+ },
+ {
+ "name": "art-run-test-2024-InvariantNegativeLoop"
+ },
+ {
+ "name": "art-run-test-2025-ChangedArrayValue"
+ },
+ {
+ "name": "art-run-test-2026-DifferentMemoryLSCouples"
+ },
+ {
+ "name": "art-run-test-2027-TwiceTheSameMemoryCouple"
+ },
+ {
+ "name": "art-run-test-2028-MultiBackward"
+ },
+ {
+ "name": "art-run-test-2029-contended-monitors"
+ },
+ {
+ "name": "art-run-test-2030-long-running-child"
+ },
+ {
+ "name": "art-run-test-2042-checker-dce-always-throw"
+ },
+ {
+ "name": "art-run-test-2231-checker-heap-poisoning"
+ },
+ {
+ "name": "art-run-test-2232-write-metrics-to-log"
+ },
+ {
+ "name": "art-run-test-2234-checker-remove-entry-suspendcheck"
+ },
+ {
+ "name": "art-run-test-2236-JdkUnsafeGetLong-regression"
+ },
+ {
+ "name": "art-run-test-300-package-override"
+ },
+ {
+ "name": "art-run-test-301-abstract-protected"
+ },
+ {
+ "name": "art-run-test-302-float-conversion"
+ },
+ {
+ "name": "art-run-test-304-method-tracing"
+ },
+ {
+ "name": "art-run-test-401-optimizing-compiler"
+ },
+ {
+ "name": "art-run-test-402-optimizing-control-flow"
+ },
+ {
+ "name": "art-run-test-403-optimizing-long"
+ },
+ {
+ "name": "art-run-test-404-optimizing-allocator"
+ },
+ {
+ "name": "art-run-test-405-optimizing-long-allocator"
+ },
+ {
+ "name": "art-run-test-406-fields"
+ },
+ {
+ "name": "art-run-test-407-arrays"
+ },
+ {
+ "name": "art-run-test-408-move-bug"
+ },
+ {
+ "name": "art-run-test-409-materialized-condition"
+ },
+ {
+ "name": "art-run-test-410-floats"
+ },
+ {
+ "name": "art-run-test-411-checker-hdiv-hrem-const"
+ },
+ {
+ "name": "art-run-test-411-checker-hdiv-hrem-pow2"
+ },
+ {
+ "name": "art-run-test-411-checker-instruct-simplifier-hrem"
+ },
+ {
+ "name": "art-run-test-411-optimizing-arith"
+ },
+ {
+ "name": "art-run-test-413-regalloc-regression"
+ },
+ {
+ "name": "art-run-test-414-static-fields"
+ },
+ {
+ "name": "art-run-test-418-const-string"
+ },
+ {
+ "name": "art-run-test-419-long-parameter"
+ },
+ {
+ "name": "art-run-test-420-const-class"
+ },
+ {
+ "name": "art-run-test-421-exceptions"
+ },
+ {
+ "name": "art-run-test-421-large-frame"
+ },
+ {
+ "name": "art-run-test-422-instanceof"
+ },
+ {
+ "name": "art-run-test-422-type-conversion"
+ },
+ {
+ "name": "art-run-test-423-invoke-interface"
+ },
+ {
+ "name": "art-run-test-424-checkcast"
+ },
+ {
+ "name": "art-run-test-426-monitor"
+ },
+ {
+ "name": "art-run-test-427-bitwise"
+ },
+ {
+ "name": "art-run-test-427-bounds"
+ },
+ {
+ "name": "art-run-test-429-ssa-builder"
+ },
+ {
+ "name": "art-run-test-430-live-register-slow-path"
+ },
+ {
+ "name": "art-run-test-433-gvn"
+ },
+ {
+ "name": "art-run-test-434-shifter-operand"
+ },
+ {
+ "name": "art-run-test-435-try-finally-without-catch"
+ },
+ {
+ "name": "art-run-test-436-rem-float"
+ },
+ {
+ "name": "art-run-test-436-shift-constant"
+ },
+ {
+ "name": "art-run-test-437-inline"
+ },
+ {
+ "name": "art-run-test-438-volatile"
+ },
+ {
+ "name": "art-run-test-439-npe"
+ },
+ {
+ "name": "art-run-test-439-swap-double"
+ },
+ {
+ "name": "art-run-test-440-stmp"
+ },
+ {
+ "name": "art-run-test-441-checker-inliner"
+ },
+ {
+ "name": "art-run-test-443-not-bool-inline"
+ },
+ {
+ "name": "art-run-test-444-checker-nce"
+ },
+ {
+ "name": "art-run-test-445-checker-licm"
+ },
+ {
+ "name": "art-run-test-446-checker-inliner2"
+ },
+ {
+ "name": "art-run-test-447-checker-inliner3"
+ },
+ {
+ "name": "art-run-test-449-checker-bce-rem"
+ },
+ {
+ "name": "art-run-test-450-checker-types"
+ },
+ {
+ "name": "art-run-test-451-regression-add-float"
+ },
+ {
+ "name": "art-run-test-451-spill-splot"
+ },
+ {
+ "name": "art-run-test-455-checker-gvn"
+ },
+ {
+ "name": "art-run-test-456-baseline-array-set"
+ },
+ {
+ "name": "art-run-test-458-long-to-fpu"
+ },
+ {
+ "name": "art-run-test-464-checker-inline-sharpen-calls"
+ },
+ {
+ "name": "art-run-test-465-checker-clinit-gvn"
+ },
+ {
+ "name": "art-run-test-469-condition-materialization"
+ },
+ {
+ "name": "art-run-test-470-huge-method"
+ },
+ {
+ "name": "art-run-test-471-deopt-environment"
+ },
+ {
+ "name": "art-run-test-472-type-propagation"
+ },
+ {
+ "name": "art-run-test-473-checker-inliner-constants"
+ },
+ {
+ "name": "art-run-test-473-remove-dead-block"
+ },
+ {
+ "name": "art-run-test-474-checker-boolean-input"
+ },
+ {
+ "name": "art-run-test-474-fp-sub-neg"
+ },
+ {
+ "name": "art-run-test-475-simplify-mul-zero"
+ },
+ {
+ "name": "art-run-test-476-checker-ctor-fence-redun-elim"
+ },
+ {
+ "name": "art-run-test-476-checker-ctor-memory-barrier"
+ },
+ {
+ "name": "art-run-test-476-clinit-inline-static-invoke"
+ },
+ {
+ "name": "art-run-test-477-checker-bound-type"
+ },
+ {
+ "name": "art-run-test-477-long-2-float-convers-precision"
+ },
+ {
+ "name": "art-run-test-478-checker-clinit-check-pruning"
+ },
+ {
+ "name": "art-run-test-478-checker-inline-noreturn"
+ },
+ {
+ "name": "art-run-test-478-checker-inliner-nested-loop"
+ },
+ {
+ "name": "art-run-test-479-regression-implicit-null-check"
+ },
+ {
+ "name": "art-run-test-480-checker-dead-blocks"
+ },
+ {
+ "name": "art-run-test-481-regression-phi-cond"
+ },
+ {
+ "name": "art-run-test-482-checker-loop-back-edge-use"
+ },
+ {
+ "name": "art-run-test-483-dce-block"
+ },
+ {
+ "name": "art-run-test-485-checker-dce-switch"
+ },
+ {
+ "name": "art-run-test-486-checker-must-do-null-check"
+ },
+ {
+ "name": "art-run-test-487-checker-inline-calls"
+ },
+ {
+ "name": "art-run-test-488-checker-inline-recursive-calls"
+ },
+ {
+ "name": "art-run-test-489-current-method-regression"
+ },
+ {
+ "name": "art-run-test-490-checker-inline"
+ },
+ {
+ "name": "art-run-test-491-current-method"
+ },
+ {
+ "name": "art-run-test-492-checker-inline-invoke-interface"
+ },
+ {
+ "name": "art-run-test-493-checker-inline-invoke-interface"
+ },
+ {
+ "name": "art-run-test-494-checker-instanceof-tests"
+ },
+ {
+ "name": "art-run-test-495-checker-checkcast-tests"
+ },
+ {
+ "name": "art-run-test-496-checker-inlining-class-loader"
+ },
+ {
+ "name": "art-run-test-499-bce-phi-array-length"
+ },
+ {
+ "name": "art-run-test-500-instanceof"
+ },
+ {
+ "name": "art-run-test-505-simplifier-type-propagation"
+ },
+ {
+ "name": "art-run-test-507-boolean-test"
+ },
+ {
+ "name": "art-run-test-507-referrer"
+ },
+ {
+ "name": "art-run-test-508-checker-disassembly"
+ },
+ {
+ "name": "art-run-test-508-referrer-method"
+ },
+ {
+ "name": "art-run-test-513-array-deopt"
+ },
+ {
+ "name": "art-run-test-514-shifts"
+ },
+ {
+ "name": "art-run-test-519-bound-load-class"
+ },
+ {
+ "name": "art-run-test-521-checker-array-set-null"
+ },
+ {
+ "name": "art-run-test-521-regression-integer-field-set"
+ },
+ {
+ "name": "art-run-test-524-boolean-simplifier-regression"
+ },
+ {
+ "name": "art-run-test-525-checker-arrays-fields1"
+ },
+ {
+ "name": "art-run-test-525-checker-arrays-fields2"
+ },
+ {
+ "name": "art-run-test-526-checker-caller-callee-regs"
+ },
+ {
+ "name": "art-run-test-526-long-regalloc"
+ },
+ {
+ "name": "art-run-test-527-checker-array-access-simd"
+ },
+ {
+ "name": "art-run-test-527-checker-array-access-split"
+ },
+ {
+ "name": "art-run-test-528-long-hint"
+ },
+ {
+ "name": "art-run-test-529-long-split"
+ },
+ {
+ "name": "art-run-test-530-checker-loops-try-catch"
+ },
+ {
+ "name": "art-run-test-530-checker-loops1"
+ },
+ {
+ "name": "art-run-test-530-checker-loops2"
+ },
+ {
+ "name": "art-run-test-530-checker-loops3"
+ },
+ {
+ "name": "art-run-test-530-checker-loops4"
+ },
+ {
+ "name": "art-run-test-530-checker-loops5"
+ },
+ {
+ "name": "art-run-test-530-checker-lse"
+ },
+ {
+ "name": "art-run-test-530-checker-lse-ctor-fences"
+ },
+ {
+ "name": "art-run-test-530-checker-lse-simd"
+ },
+ {
+ "name": "art-run-test-530-checker-lse-try-catch"
+ },
+ {
+ "name": "art-run-test-530-instanceof-checkcast"
+ },
+ {
+ "name": "art-run-test-532-checker-nonnull-arrayset"
+ },
+ {
+ "name": "art-run-test-534-checker-bce-deoptimization"
+ },
+ {
+ "name": "art-run-test-535-deopt-and-inlining"
+ },
+ {
+ "name": "art-run-test-536-checker-intrinsic-optimization"
+ },
+ {
+ "name": "art-run-test-537-checker-arraycopy"
+ },
+ {
+ "name": "art-run-test-537-checker-jump-over-jump"
+ },
+ {
+ "name": "art-run-test-538-checker-embed-constants"
+ },
+ {
+ "name": "art-run-test-540-checker-rtp-bug"
+ },
+ {
+ "name": "art-run-test-542-bitfield-rotates"
+ },
+ {
+ "name": "art-run-test-542-inline-trycatch"
+ },
+ {
+ "name": "art-run-test-542-unresolved-access-check"
+ },
+ {
+ "name": "art-run-test-545-tracing-and-jit"
+ },
+ {
+ "name": "art-run-test-548-checker-inlining-and-dce"
+ },
+ {
+ "name": "art-run-test-549-checker-types-merge"
+ },
+ {
+ "name": "art-run-test-550-checker-multiply-accumulate"
+ },
+ {
+ "name": "art-run-test-550-new-instance-clinit"
+ },
+ {
+ "name": "art-run-test-551-checker-clinit"
+ },
+ {
+ "name": "art-run-test-551-checker-shifter-operand"
+ },
+ {
+ "name": "art-run-test-551-implicit-null-checks"
+ },
+ {
+ "name": "art-run-test-552-checker-x86-avx2-bit-manipulation"
+ },
+ {
+ "name": "art-run-test-554-checker-rtp-checkcast"
+ },
+ {
+ "name": "art-run-test-557-checker-instruct-simplifier-ror"
+ },
+ {
+ "name": "art-run-test-558-switch"
+ },
+ {
+ "name": "art-run-test-559-bce-ssa"
+ },
+ {
+ "name": "art-run-test-559-checker-rtp-ifnotnull"
+ },
+ {
+ "name": "art-run-test-560-packed-switch"
+ },
+ {
+ "name": "art-run-test-561-divrem"
+ },
+ {
+ "name": "art-run-test-561-shared-slowpaths"
+ },
+ {
+ "name": "art-run-test-562-bce-preheader"
+ },
+ {
+ "name": "art-run-test-562-checker-no-intermediate"
+ },
+ {
+ "name": "art-run-test-563-checker-invoke-super"
+ },
+ {
+ "name": "art-run-test-564-checker-bitcount"
+ },
+ {
+ "name": "art-run-test-564-checker-inline-loop"
+ },
+ {
+ "name": "art-run-test-564-checker-negbitwise"
+ },
+ {
+ "name": "art-run-test-565-checker-condition-liveness"
+ },
+ {
+ "name": "art-run-test-566-checker-codegen-select"
+ },
+ {
+ "name": "art-run-test-567-checker-builder-intrinsics"
+ },
+ {
+ "name": "art-run-test-568-checker-onebit"
+ },
+ {
+ "name": "art-run-test-570-checker-select"
+ },
+ {
+ "name": "art-run-test-572-checker-array-get-regression"
+ },
+ {
+ "name": "art-run-test-573-checker-checkcast-regression"
+ },
+ {
+ "name": "art-run-test-576-polymorphic-inlining"
+ },
+ {
+ "name": "art-run-test-577-checker-fp2int"
+ },
+ {
+ "name": "art-run-test-578-bce-visit"
+ },
+ {
+ "name": "art-run-test-578-polymorphic-inlining"
+ },
+ {
+ "name": "art-run-test-579-inline-infinite"
+ },
+ {
+ "name": "art-run-test-580-checker-fp16"
+ },
+ {
+ "name": "art-run-test-580-checker-round"
+ },
+ {
+ "name": "art-run-test-580-checker-string-fact-intrinsics"
+ },
+ {
+ "name": "art-run-test-580-crc32"
+ },
+ {
+ "name": "art-run-test-581-checker-rtp"
+ },
+ {
+ "name": "art-run-test-582-checker-bce-length"
+ },
+ {
+ "name": "art-run-test-583-checker-zero"
+ },
+ {
+ "name": "art-run-test-584-checker-div-bool"
+ },
+ {
+ "name": "art-run-test-589-super-imt"
+ },
+ {
+ "name": "art-run-test-590-checker-arr-set-null-regression"
+ },
+ {
+ "name": "art-run-test-591-checker-regression-dead-loop"
+ },
+ {
+ "name": "art-run-test-593-checker-long-2-float-regression"
+ },
+ {
+ "name": "art-run-test-594-checker-array-alias"
+ },
+ {
+ "name": "art-run-test-594-load-string-regression"
+ },
+ {
+ "name": "art-run-test-603-checker-instanceof"
+ },
+ {
+ "name": "art-run-test-605-new-string-from-bytes"
+ },
+ {
+ "name": "art-run-test-607-daemon-stress"
+ },
+ {
+ "name": "art-run-test-609-checker-inline-interface"
+ },
+ {
+ "name": "art-run-test-609-checker-x86-bounds-check"
+ },
+ {
+ "name": "art-run-test-610-arraycopy"
+ },
+ {
+ "name": "art-run-test-611-checker-simplify-if"
+ },
+ {
+ "name": "art-run-test-614-checker-dump-constant-location"
+ },
+ {
+ "name": "art-run-test-615-checker-arm64-store-zero"
+ },
+ {
+ "name": "art-run-test-617-clinit-oome"
+ },
+ {
+ "name": "art-run-test-618-checker-induction"
+ },
+ {
+ "name": "art-run-test-619-checker-current-method"
+ },
+ {
+ "name": "art-run-test-620-checker-bce-intrinsics"
+ },
+ {
+ "name": "art-run-test-622-checker-bce-regressions"
+ },
+ {
+ "name": "art-run-test-625-checker-licm-regressions"
+ },
+ {
+ "name": "art-run-test-627-checker-unroll"
+ },
+ {
+ "name": "art-run-test-628-vdex"
+ },
+ {
+ "name": "art-run-test-631-checker-get-class"
+ },
+ {
+ "name": "art-run-test-632-checker-char-at-bounds"
+ },
+ {
+ "name": "art-run-test-635-checker-arm64-volatile-load-cc"
+ },
+ {
+ "name": "art-run-test-636-arm64-veneer-pool"
+ },
+ {
+ "name": "art-run-test-637-checker-throw-inline"
+ },
+ {
+ "name": "art-run-test-639-checker-code-sinking"
+ },
+ {
+ "name": "art-run-test-640-checker-boolean-simd"
+ },
+ {
+ "name": "art-run-test-640-checker-integer-valueof"
+ },
+ {
+ "name": "art-run-test-640-checker-simd"
+ },
+ {
+ "name": "art-run-test-641-checker-arraycopy"
+ },
+ {
+ "name": "art-run-test-641-iterations"
+ },
+ {
+ "name": "art-run-test-643-checker-bogus-ic"
+ },
+ {
+ "name": "art-run-test-645-checker-abs-simd"
+ },
+ {
+ "name": "art-run-test-646-checker-arraycopy-large-cst-pos"
+ },
+ {
+ "name": "art-run-test-646-checker-long-const-to-int"
+ },
+ {
+ "name": "art-run-test-646-checker-simd-hadd"
+ },
+ {
+ "name": "art-run-test-650-checker-inline-access-thunks"
+ },
+ {
+ "name": "art-run-test-654-checker-periodic"
+ },
+ {
+ "name": "art-run-test-655-checker-simd-arm-opt"
+ },
+ {
+ "name": "art-run-test-656-checker-simd-opt"
+ },
+ {
+ "name": "art-run-test-657-branches"
+ },
+ {
+ "name": "art-run-test-658-fp-read-barrier"
+ },
+ {
+ "name": "art-run-test-659-unpadded-array"
+ },
+ {
+ "name": "art-run-test-660-checker-sad"
+ },
+ {
+ "name": "art-run-test-660-checker-simd-sad"
+ },
+ {
+ "name": "art-run-test-661-checker-simd-reduc"
+ },
+ {
+ "name": "art-run-test-662-regression-alias"
+ },
+ {
+ "name": "art-run-test-665-checker-simd-zero"
+ },
+ {
+ "name": "art-run-test-666-dex-cache-itf"
+ },
+ {
+ "name": "art-run-test-667-checker-simd-alignment"
+ },
+ {
+ "name": "art-run-test-667-out-of-bounds"
+ },
+ {
+ "name": "art-run-test-669-checker-break"
+ },
+ {
+ "name": "art-run-test-671-npe-field-opts"
+ },
+ {
+ "name": "art-run-test-672-checker-throw-method"
+ },
+ {
+ "name": "art-run-test-673-checker-throw-vmethod"
+ },
+ {
+ "name": "art-run-test-676-proxy-jit-at-first-use"
+ },
+ {
+ "name": "art-run-test-677-fsi2"
+ },
+ {
+ "name": "art-run-test-678-quickening"
+ },
+ {
+ "name": "art-run-test-684-checker-simd-dotprod"
+ },
+ {
+ "name": "art-run-test-684-select-condition"
+ },
+ {
+ "name": "art-run-test-689-multi-catch"
+ },
+ {
+ "name": "art-run-test-694-clinit-jit"
+ },
+ {
+ "name": "art-run-test-695-simplify-throws"
+ },
+ {
+ "name": "art-run-test-696-loop"
+ },
+ {
+ "name": "art-run-test-697-checker-string-append"
+ },
+ {
+ "name": "art-run-test-698-selects"
+ },
+ {
+ "name": "art-run-test-700-LoadArgRegs"
+ },
+ {
+ "name": "art-run-test-703-floating-point-div"
+ },
+ {
+ "name": "art-run-test-704-multiply-accumulate"
+ },
+ {
+ "name": "art-run-test-705-register-conflict"
+ },
+ {
+ "name": "art-run-test-711-checker-type-conversion"
+ },
+ {
+ "name": "art-run-test-713-varhandle-invokers"
+ },
+ {
+ "name": "art-run-test-718-zipfile-finalizer"
+ },
+ {
+ "name": "art-run-test-719-varhandle-concurrency"
+ },
+ {
+ "name": "art-run-test-721-osr"
+ },
+ {
+ "name": "art-run-test-726-array-store"
+ },
+ {
+ "name": "art-run-test-730-checker-inlining-super"
+ },
+ {
+ "name": "art-run-test-731-bounds-check-slow-path"
+ },
+ {
+ "name": "art-run-test-805-TooDeepClassInstanceOf"
+ },
+ {
+ "name": "art-run-test-806-TooWideClassInstanceOf"
+ },
+ {
+ "name": "art-run-test-812-recursive-default"
+ },
+ {
+ "name": "art-run-test-814-large-field-offsets"
+ },
+ {
+ "name": "art-run-test-815-invokeinterface-default"
+ },
+ {
+ "name": "art-run-test-818-clinit-nterp"
+ },
+ {
+ "name": "art-run-test-821-madvise-willneed"
+ },
+ {
+ "name": "art-run-test-828-partial-lse"
+ },
+ {
+ "name": "art-run-test-834-lse"
+ },
+ {
+ "name": "art-run-test-835-b216762268"
+ },
+ {
+ "name": "art-run-test-963-default-range-smali"
+ },
+ {
+ "name": "art_libnativebridge_cts_tests"
+ },
+ {
+ "name": "art_standalone_cmdline_tests"
+ },
+ {
+ "name": "art_standalone_dexdump_tests"
+ },
+ {
+ "name": "art_standalone_dexlist_tests"
+ },
+ {
+ "name": "art_standalone_dexoptanalyzer_tests"
+ },
+ {
+ "name": "art_standalone_libartbase_tests"
+ },
+ {
+ "name": "art_standalone_libartpalette_tests"
+ },
+ {
+ "name": "art_standalone_libartservice_tests"
+ },
+ {
+ "name": "art_standalone_libarttools_tests"
+ },
+ {
+ "name": "art_standalone_libdexfile_support_tests"
+ },
+ {
+ "name": "art_standalone_libdexfile_tests"
+ },
+ {
+ "name": "art_standalone_libprofile_tests"
+ },
+ {
+ "name": "art_standalone_oatdump_tests"
+ },
+ {
+ "name": "art_standalone_odrefresh_tests"
+ },
+ {
+ "name": "art_standalone_profman_tests"
+ },
+ {
+ "name": "art_standalone_runtime_compiler_tests"
+ },
+ {
+ "name": "art_standalone_runtime_tests"
+ },
+ {
+ "name": "art_standalone_sigchain_tests"
+ },
+ {
+ "name": "libnativeloader_test"
+ }
]
}
diff --git a/artd/artd.cc b/artd/artd.cc
index 1dcd2e8..b52fca3 100644
--- a/artd/artd.cc
+++ b/artd/artd.cc
@@ -22,7 +22,7 @@
#include <unistd.h>
#include <utils/Errors.h>
-#include "aidl/android/os/BnArtd.h"
+#include "aidl/com/android/server/art/BnArtd.h"
#include "base/logging.h"
#include "base/macros.h"
#include "tools/tools.h"
@@ -32,7 +32,7 @@
namespace android {
namespace artd {
-class Artd : public aidl::android::os::BnArtd {
+class Artd : public aidl::com::android::server::art::BnArtd {
constexpr static const char* const SERVICE_NAME = "artd";
public:
diff --git a/artd/binder/Android.bp b/artd/binder/Android.bp
index 6acfe4e..ad8474f 100644
--- a/artd/binder/Android.bp
+++ b/artd/binder/Android.bp
@@ -25,7 +25,7 @@
aidl_interface {
name: "artd-aidl",
srcs: [
- "android/os/IArtd.aidl",
+ "com/android/server/art/*.aidl",
],
host_supported: true,
backend: {
diff --git a/artd/binder/android/os/IArtd.aidl b/artd/binder/com/android/server/art/IArtd.aidl
similarity index 95%
rename from artd/binder/android/os/IArtd.aidl
rename to artd/binder/com/android/server/art/IArtd.aidl
index a16764b..58b2aae 100644
--- a/artd/binder/android/os/IArtd.aidl
+++ b/artd/binder/com/android/server/art/IArtd.aidl
@@ -14,7 +14,7 @@
* limitations under the License.
*/
-package android.os;
+package com.android.server.art;
/** {@hide} */
interface IArtd {
diff --git a/artd/tests/src/com/android/art/ArtdIntegrationTests.java b/artd/tests/src/com/android/art/ArtdIntegrationTests.java
index 7d40adb..2a32972 100644
--- a/artd/tests/src/com/android/art/ArtdIntegrationTests.java
+++ b/artd/tests/src/com/android/art/ArtdIntegrationTests.java
@@ -16,11 +16,12 @@
package com.android.art;
-import android.os.IArtd;
import android.os.IBinder;
import android.os.RemoteException;
import android.os.ServiceManager;
+import com.android.server.art.IArtd;
+
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
diff --git a/build/apex/Android.bp b/build/apex/Android.bp
index fb071f1..6b9e684 100644
--- a/build/apex/Android.bp
+++ b/build/apex/Android.bp
@@ -59,7 +59,6 @@
// TODO(b/124476339): Clean up the following libraries once "required"
// dependencies work with APEX libraries.
"libart-compiler",
- "libartservice",
"libdt_fd_forward",
"libdt_socket",
"libjdwp",
@@ -267,6 +266,9 @@
native_shared_libs: art_runtime_base_native_shared_libs +
art_runtime_base_native_device_only_shared_libs +
libcore_native_shared_libs,
+ jni_libs: [
+ "libartservice",
+ ],
binaries: [
"artd",
],
diff --git a/compiler/exception_test.cc b/compiler/exception_test.cc
index 495398b..4471b93 100644
--- a/compiler/exception_test.cc
+++ b/compiler/exception_test.cc
@@ -187,14 +187,16 @@
fake_stack.push_back(0);
}
- fake_stack.push_back(method_g_->GetOatQuickMethodHeader(0)->ToNativeQuickPc(
+ OatQuickMethodHeader* header = OatQuickMethodHeader::FromEntryPoint(
+ method_g_->GetEntryPointFromQuickCompiledCode());
+ fake_stack.push_back(header->ToNativeQuickPc(
method_g_, kDexPc, /* is_for_catch_handler= */ false)); // return pc
// Create/push fake 16byte stack frame for method g
fake_stack.push_back(reinterpret_cast<uintptr_t>(method_g_));
fake_stack.push_back(0);
fake_stack.push_back(0);
- fake_stack.push_back(method_g_->GetOatQuickMethodHeader(0)->ToNativeQuickPc(
+ fake_stack.push_back(header->ToNativeQuickPc(
method_g_, kDexPc, /* is_for_catch_handler= */ false)); // return pc
// Create/push fake 16byte stack frame for method f
diff --git a/compiler/jni/jni_compiler_test.cc b/compiler/jni/jni_compiler_test.cc
index 0a1f017..d05324b 100644
--- a/compiler/jni/jni_compiler_test.cc
+++ b/compiler/jni/jni_compiler_test.cc
@@ -414,7 +414,7 @@
CHECK(!caller->IsCriticalNative());
CHECK(caller->IsSynchronized());
ObjPtr<mirror::Object> lock;
- if (self->GetManagedStack()->GetTopQuickFrameTag()) {
+ if (self->GetManagedStack()->GetTopQuickFrameGenericJniTag()) {
// Generic JNI.
lock = GetGenericJniSynchronizationObject(self, caller);
} else if (caller->IsStatic()) {
diff --git a/compiler/jni/quick/jni_compiler.cc b/compiler/jni/quick/jni_compiler.cc
index 6cb5021..b88ebaf 100644
--- a/compiler/jni/quick/jni_compiler.cc
+++ b/compiler/jni/quick/jni_compiler.cc
@@ -95,6 +95,13 @@
const InstructionSetFeatures* instruction_set_features =
compiler_options.GetInstructionSetFeatures();
+ // When walking the stack the top frame doesn't have a pc associated with it. We then depend on
+ // the invariant that we don't have JITed code when AOT code is available. In debuggable runtimes
+ // this invariant doesn't hold. So we tag the SP for JITed code to indentify if we are executing
+ // JITed code or AOT code. Since tagging involves additional instructions we tag only in
+ // debuggable runtimes.
+ bool should_tag_sp = compiler_options.GetDebuggable() && compiler_options.IsJitCompiler();
+
// i.e. if the method was annotated with @FastNative
const bool is_fast_native = (access_flags & kAccFastNative) != 0u;
@@ -219,7 +226,7 @@
// because garbage collections are disabled within the execution of a
// @CriticalNative method.
if (LIKELY(!is_critical_native)) {
- __ StoreStackPointerToThread(Thread::TopOfManagedStackOffset<kPointerSize>());
+ __ StoreStackPointerToThread(Thread::TopOfManagedStackOffset<kPointerSize>(), should_tag_sp);
}
// 2. Lock the object (if synchronized) and transition out of Runnable (if normal native).
@@ -605,7 +612,7 @@
if (reference_return) {
// Suspend check entry point overwrites top of managed stack and leaves it clobbered.
// We need to restore the top for subsequent runtime call to `JniDecodeReferenceResult()`.
- __ StoreStackPointerToThread(Thread::TopOfManagedStackOffset<kPointerSize>());
+ __ StoreStackPointerToThread(Thread::TopOfManagedStackOffset<kPointerSize>(), should_tag_sp);
}
if (reference_return && main_out_arg_size != 0) {
__ IncreaseFrameSize(main_out_arg_size);
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index d81a7b5..b09219a 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -33,6 +33,7 @@
#include "graph_visualizer.h"
#include "locations.h"
#include "nodes.h"
+#include "oat_quick_method_header.h"
#include "optimizing_compiler_stats.h"
#include "read_barrier_option.h"
#include "stack.h"
diff --git a/compiler/utils/arm/jni_macro_assembler_arm_vixl.cc b/compiler/utils/arm/jni_macro_assembler_arm_vixl.cc
index 6e6d40d..61151fe 100644
--- a/compiler/utils/arm/jni_macro_assembler_arm_vixl.cc
+++ b/compiler/utils/arm/jni_macro_assembler_arm_vixl.cc
@@ -428,8 +428,15 @@
asm_.StoreToOffset(kStoreWord, scratch, tr, thr_offs.Int32Value());
}
-void ArmVIXLJNIMacroAssembler::StoreStackPointerToThread(ThreadOffset32 thr_offs) {
- asm_.StoreToOffset(kStoreWord, sp, tr, thr_offs.Int32Value());
+void ArmVIXLJNIMacroAssembler::StoreStackPointerToThread(ThreadOffset32 thr_offs, bool tag_sp) {
+ if (tag_sp) {
+ UseScratchRegisterScope temps(asm_.GetVIXLAssembler());
+ vixl32::Register reg = temps.Acquire();
+ ___ Orr(reg, sp, 0x2);
+ asm_.StoreToOffset(kStoreWord, reg, tr, thr_offs.Int32Value());
+ } else {
+ asm_.StoreToOffset(kStoreWord, sp, tr, thr_offs.Int32Value());
+ }
}
void ArmVIXLJNIMacroAssembler::SignExtend(ManagedRegister mreg ATTRIBUTE_UNUSED,
diff --git a/compiler/utils/arm/jni_macro_assembler_arm_vixl.h b/compiler/utils/arm/jni_macro_assembler_arm_vixl.h
index ed453ae..980de41 100644
--- a/compiler/utils/arm/jni_macro_assembler_arm_vixl.h
+++ b/compiler/utils/arm/jni_macro_assembler_arm_vixl.h
@@ -70,7 +70,7 @@
void StoreStackOffsetToThread(ThreadOffset32 thr_offs, FrameOffset fr_offs) override;
- void StoreStackPointerToThread(ThreadOffset32 thr_offs) override;
+ void StoreStackPointerToThread(ThreadOffset32 thr_offs, bool tag_sp) override;
void StoreSpanning(FrameOffset dest, ManagedRegister src, FrameOffset in_off) override;
diff --git a/compiler/utils/arm64/jni_macro_assembler_arm64.cc b/compiler/utils/arm64/jni_macro_assembler_arm64.cc
index 50ca468..323a01e 100644
--- a/compiler/utils/arm64/jni_macro_assembler_arm64.cc
+++ b/compiler/utils/arm64/jni_macro_assembler_arm64.cc
@@ -218,10 +218,13 @@
___ Str(scratch, MEM_OP(reg_x(TR), tr_offs.Int32Value()));
}
-void Arm64JNIMacroAssembler::StoreStackPointerToThread(ThreadOffset64 tr_offs) {
+void Arm64JNIMacroAssembler::StoreStackPointerToThread(ThreadOffset64 tr_offs, bool tag_sp) {
UseScratchRegisterScope temps(asm_.GetVIXLAssembler());
Register scratch = temps.AcquireX();
___ Mov(scratch, reg_x(SP));
+ if (tag_sp) {
+ ___ Orr(scratch, scratch, 0x2);
+ }
___ Str(scratch, MEM_OP(reg_x(TR), tr_offs.Int32Value()));
}
diff --git a/compiler/utils/arm64/jni_macro_assembler_arm64.h b/compiler/utils/arm64/jni_macro_assembler_arm64.h
index 2c04184..daea95d 100644
--- a/compiler/utils/arm64/jni_macro_assembler_arm64.h
+++ b/compiler/utils/arm64/jni_macro_assembler_arm64.h
@@ -72,7 +72,7 @@
void StoreRawPtr(FrameOffset dest, ManagedRegister src) override;
void StoreImmediateToFrame(FrameOffset dest, uint32_t imm) override;
void StoreStackOffsetToThread(ThreadOffset64 thr_offs, FrameOffset fr_offs) override;
- void StoreStackPointerToThread(ThreadOffset64 thr_offs) override;
+ void StoreStackPointerToThread(ThreadOffset64 thr_offs, bool tag_sp) override;
void StoreSpanning(FrameOffset dest, ManagedRegister src, FrameOffset in_off) override;
// Load routines.
diff --git a/compiler/utils/assembler_thumb_test.cc b/compiler/utils/assembler_thumb_test.cc
index b2d4dcd..f867a05 100644
--- a/compiler/utils/assembler_thumb_test.cc
+++ b/compiler/utils/assembler_thumb_test.cc
@@ -159,7 +159,8 @@
__ StoreRef(FrameOffset(48), scratch_register);
__ StoreSpanning(FrameOffset(48), method_register, FrameOffset(48));
__ StoreStackOffsetToThread(ThreadOffset32(512), FrameOffset(4096));
- __ StoreStackPointerToThread(ThreadOffset32(512));
+ __ StoreStackPointerToThread(ThreadOffset32(512), false);
+ __ StoreStackPointerToThread(ThreadOffset32(512), true);
// Other
__ Call(method_register, FrameOffset(48));
diff --git a/compiler/utils/assembler_thumb_test_expected.cc.inc b/compiler/utils/assembler_thumb_test_expected.cc.inc
index b6c6025..dac21ae 100644
--- a/compiler/utils/assembler_thumb_test_expected.cc.inc
+++ b/compiler/utils/assembler_thumb_test_expected.cc.inc
@@ -41,46 +41,46 @@
" 7e: 0d f5 80 5c add.w r12, sp, #4096\n"
" 82: c9 f8 00 c2 str.w r12, [r9, #512]\n"
" 86: c9 f8 00 d2 str.w sp, [r9, #512]\n"
- " 8a: d0 f8 30 e0 ldr.w lr, [r0, #48]\n"
- " 8e: f0 47 blx lr\n"
- " 90: dd f8 2c c0 ldr.w r12, [sp, #44]\n"
- " 94: cd f8 30 c0 str.w r12, [sp, #48]\n"
- " 98: d9 f8 00 c2 ldr.w r12, [r9, #512]\n"
- " 9c: cd f8 2c c0 str.w r12, [sp, #44]\n"
- " a0: dd f8 2c c0 ldr.w r12, [sp, #44]\n"
- " a4: cd f8 30 c0 str.w r12, [sp, #48]\n"
- " a8: 48 46 mov r0, r9\n"
- " aa: cd f8 30 90 str.w r9, [sp, #48]\n"
- " ae: 04 46 mov r4, r0\n"
- " b0: 0d f1 30 0c add.w r12, sp, #48\n"
- " b4: bb f1 00 0f cmp.w r11, #0\n"
- " b8: 18 bf it ne\n"
- " ba: e3 46 movne r11, r12\n"
- " bc: 0d f1 30 0b add.w r11, sp, #48\n"
- " c0: 5f ea 0b 00 movs.w r0, r11\n"
- " c4: 18 bf it ne\n"
- " c6: 0c a8 addne r0, sp, #48\n"
- " c8: dd f8 40 c0 ldr.w r12, [sp, #64]\n"
- " cc: bc f1 00 0f cmp.w r12, #0\n"
- " d0: 18 bf it ne\n"
- " d2: 0d f1 40 0c addne.w r12, sp, #64\n"
- " d6: cd f8 30 c0 str.w r12, [sp, #48]\n"
- " da: 5f ea 0b 00 movs.w r0, r11\n"
- " de: 18 bf it ne\n"
- " e0: 00 a8 addne r0, sp, #0\n"
- " e2: 0d f2 04 40 addw r0, sp, #1028\n"
- " e6: bb f1 00 0f cmp.w r11, #0\n"
- " ea: 08 bf it eq\n"
- " ec: 58 46 moveq r0, r11\n"
- " ee: 0d f2 04 4c addw r12, sp, #1028\n"
- " f2: bb f1 00 0f cmp.w r11, #0\n"
- " f6: 18 bf it ne\n"
- " f8: e3 46 movne r11, r12\n"
- " fa: d9 f8 94 c0 ldr.w r12, [r9, #148]\n"
- " fe: bc f1 00 0f cmp.w r12, #0\n"
- " 102: 71 d1 bne 0x1e8 @ imm = #226\n"
- " 104: cd f8 ff c7 str.w r12, [sp, #2047]\n"
- " 108: cd f8 ff c7 str.w r12, [sp, #2047]\n"
+ " 8a: 4d f0 02 0c orr r12, sp, #2\n"
+ " 8e: c9 f8 00 c2 str.w r12, [r9, #512]\n"
+ " 92: d0 f8 30 e0 ldr.w lr, [r0, #48]\n"
+ " 96: f0 47 blx lr\n"
+ " 98: dd f8 2c c0 ldr.w r12, [sp, #44]\n"
+ " 9c: cd f8 30 c0 str.w r12, [sp, #48]\n"
+ " a0: d9 f8 00 c2 ldr.w r12, [r9, #512]\n"
+ " a4: cd f8 2c c0 str.w r12, [sp, #44]\n"
+ " a8: dd f8 2c c0 ldr.w r12, [sp, #44]\n"
+ " ac: cd f8 30 c0 str.w r12, [sp, #48]\n"
+ " b0: 48 46 mov r0, r9\n"
+ " b2: cd f8 30 90 str.w r9, [sp, #48]\n"
+ " b6: 04 46 mov r4, r0\n"
+ " b8: 0d f1 30 0c add.w r12, sp, #48\n"
+ " bc: bb f1 00 0f cmp.w r11, #0\n"
+ " c0: 18 bf it ne\n"
+ " c2: e3 46 movne r11, r12\n"
+ " c4: 0d f1 30 0b add.w r11, sp, #48\n"
+ " c8: 5f ea 0b 00 movs.w r0, r11\n"
+ " cc: 18 bf it ne\n"
+ " ce: 0c a8 addne r0, sp, #48\n"
+ " d0: dd f8 40 c0 ldr.w r12, [sp, #64]\n"
+ " d4: bc f1 00 0f cmp.w r12, #0\n"
+ " d8: 18 bf it ne\n"
+ " da: 0d f1 40 0c addne.w r12, sp, #64\n"
+ " de: cd f8 30 c0 str.w r12, [sp, #48]\n"
+ " e2: 5f ea 0b 00 movs.w r0, r11\n"
+ " e6: 18 bf it ne\n"
+ " e8: 00 a8 addne r0, sp, #0\n"
+ " ea: 0d f2 04 40 addw r0, sp, #1028\n"
+ " ee: bb f1 00 0f cmp.w r11, #0\n"
+ " f2: 08 bf it eq\n"
+ " f4: 58 46 moveq r0, r11\n"
+ " f6: 0d f2 04 4c addw r12, sp, #1028\n"
+ " fa: bb f1 00 0f cmp.w r11, #0\n"
+ " fe: 18 bf it ne\n"
+ " 100: e3 46 movne r11, r12\n"
+ " 102: d9 f8 9c c0 ldr.w r12, [r9, #156]\n"
+ " 106: bc f1 00 0f cmp.w r12, #0\n"
+ " 10a: 71 d1 bne 0x1f0 @ imm = #226\n"
" 10c: cd f8 ff c7 str.w r12, [sp, #2047]\n"
" 110: cd f8 ff c7 str.w r12, [sp, #2047]\n"
" 114: cd f8 ff c7 str.w r12, [sp, #2047]\n"
@@ -135,26 +135,28 @@
" 1d8: cd f8 ff c7 str.w r12, [sp, #2047]\n"
" 1dc: cd f8 ff c7 str.w r12, [sp, #2047]\n"
" 1e0: cd f8 ff c7 str.w r12, [sp, #2047]\n"
- " 1e4: 00 f0 02 b8 b.w 0x1ec @ imm = #4\n"
- " 1e8: 00 f0 1b b8 b.w 0x222 @ imm = #54\n"
- " 1ec: cd f8 ff c7 str.w r12, [sp, #2047]\n"
- " 1f0: cd f8 ff c7 str.w r12, [sp, #2047]\n"
+ " 1e4: cd f8 ff c7 str.w r12, [sp, #2047]\n"
+ " 1e8: cd f8 ff c7 str.w r12, [sp, #2047]\n"
+ " 1ec: 00 f0 02 b8 b.w 0x1f4 @ imm = #4\n"
+ " 1f0: 00 f0 1b b8 b.w 0x22a @ imm = #54\n"
" 1f4: cd f8 ff c7 str.w r12, [sp, #2047]\n"
" 1f8: cd f8 ff c7 str.w r12, [sp, #2047]\n"
" 1fc: cd f8 ff c7 str.w r12, [sp, #2047]\n"
" 200: cd f8 ff c7 str.w r12, [sp, #2047]\n"
" 204: cd f8 ff c7 str.w r12, [sp, #2047]\n"
" 208: cd f8 ff c7 str.w r12, [sp, #2047]\n"
- " 20c: 0d f5 80 5d add.w sp, sp, #4096\n"
- " 210: 08 b0 add sp, #32\n"
- " 212: 01 b0 add sp, #4\n"
- " 214: bd ec 10 8a vpop {s16, s17, s18, s19, s20, s21, s22, s23, s24, s25, s26, s27, s28, s29, s30, s31}\n"
- " 218: bd e8 e0 4d pop.w {r5, r6, r7, r8, r10, r11, lr}\n"
- " 21c: d9 f8 24 80 ldr.w r8, [r9, #36]\n"
- " 220: 70 47 bx lr\n"
- " 222: d9 f8 94 00 ldr.w r0, [r9, #148]\n"
- " 226: d9 f8 c8 e2 ldr.w lr, [r9, #712]\n"
- " 22a: f0 47 blx lr\n"
+ " 20c: cd f8 ff c7 str.w r12, [sp, #2047]\n"
+ " 210: cd f8 ff c7 str.w r12, [sp, #2047]\n"
+ " 214: 0d f5 80 5d add.w sp, sp, #4096\n"
+ " 218: 08 b0 add sp, #32\n"
+ " 21a: 01 b0 add sp, #4\n"
+ " 21c: bd ec 10 8a vpop {s16, s17, s18, s19, s20, s21, s22, s23, s24, s25, s26, s27, s28, s29, s30, s31}\n"
+ " 220: bd e8 e0 4d pop.w {r5, r6, r7, r8, r10, r11, lr}\n"
+ " 224: d9 f8 24 80 ldr.w r8, [r9, #36]\n"
+ " 228: 70 47 bx lr\n"
+ " 22a: d9 f8 9c 00 ldr.w r0, [r9, #156]\n"
+ " 22e: d9 f8 d0 e2 ldr.w lr, [r9, #720]\n"
+ " 232: f0 47 blx lr\n"
};
const char* const VixlLoadFromOffsetResults = {
diff --git a/compiler/utils/jni_macro_assembler.h b/compiler/utils/jni_macro_assembler.h
index 7022e3d..c8c713a 100644
--- a/compiler/utils/jni_macro_assembler.h
+++ b/compiler/utils/jni_macro_assembler.h
@@ -126,7 +126,11 @@
virtual void StoreStackOffsetToThread(ThreadOffset<kPointerSize> thr_offs,
FrameOffset fr_offs) = 0;
- virtual void StoreStackPointerToThread(ThreadOffset<kPointerSize> thr_offs) = 0;
+ // Stores stack pointer by tagging it if required so we can walk the stack. In debuggable runtimes
+ // we use tag to tell if we are using JITed code or AOT code. In non-debuggable runtimes we never
+ // use JITed code when AOT code is present. So checking for AOT code is sufficient to detect which
+ // code is being executed. We avoid tagging in non-debuggable runtimes to reduce instructions.
+ virtual void StoreStackPointerToThread(ThreadOffset<kPointerSize> thr_offs, bool tag_sp) = 0;
virtual void StoreSpanning(FrameOffset dest,
ManagedRegister src,
diff --git a/compiler/utils/x86/jni_macro_assembler_x86.cc b/compiler/utils/x86/jni_macro_assembler_x86.cc
index 685f5f1..55d5428 100644
--- a/compiler/utils/x86/jni_macro_assembler_x86.cc
+++ b/compiler/utils/x86/jni_macro_assembler_x86.cc
@@ -187,8 +187,18 @@
__ fs()->movl(Address::Absolute(thr_offs), scratch);
}
-void X86JNIMacroAssembler::StoreStackPointerToThread(ThreadOffset32 thr_offs) {
- __ fs()->movl(Address::Absolute(thr_offs), ESP);
+void X86JNIMacroAssembler::StoreStackPointerToThread(ThreadOffset32 thr_offs, bool tag_sp) {
+ if (tag_sp) {
+ // There is no free register, store contents onto stack and restore back later.
+ Register scratch = ECX;
+ __ movl(Address(ESP, -32), scratch);
+ __ movl(scratch, ESP);
+ __ orl(scratch, Immediate(0x2));
+ __ fs()->movl(Address::Absolute(thr_offs), scratch);
+ __ movl(scratch, Address(ESP, -32));
+ } else {
+ __ fs()->movl(Address::Absolute(thr_offs), ESP);
+ }
}
void X86JNIMacroAssembler::StoreSpanning(FrameOffset /*dst*/,
diff --git a/compiler/utils/x86/jni_macro_assembler_x86.h b/compiler/utils/x86/jni_macro_assembler_x86.h
index 29fccfd..f8ce38b 100644
--- a/compiler/utils/x86/jni_macro_assembler_x86.h
+++ b/compiler/utils/x86/jni_macro_assembler_x86.h
@@ -66,7 +66,7 @@
void StoreStackOffsetToThread(ThreadOffset32 thr_offs, FrameOffset fr_offs) override;
- void StoreStackPointerToThread(ThreadOffset32 thr_offs) override;
+ void StoreStackPointerToThread(ThreadOffset32 thr_offs, bool tag_sp) override;
void StoreSpanning(FrameOffset dest, ManagedRegister src, FrameOffset in_off) override;
diff --git a/compiler/utils/x86_64/jni_macro_assembler_x86_64.cc b/compiler/utils/x86_64/jni_macro_assembler_x86_64.cc
index d5d1bba..adc431f 100644
--- a/compiler/utils/x86_64/jni_macro_assembler_x86_64.cc
+++ b/compiler/utils/x86_64/jni_macro_assembler_x86_64.cc
@@ -217,8 +217,15 @@
__ gs()->movq(Address::Absolute(thr_offs, true), scratch);
}
-void X86_64JNIMacroAssembler::StoreStackPointerToThread(ThreadOffset64 thr_offs) {
- __ gs()->movq(Address::Absolute(thr_offs, true), CpuRegister(RSP));
+void X86_64JNIMacroAssembler::StoreStackPointerToThread(ThreadOffset64 thr_offs, bool tag_sp) {
+ if (tag_sp) {
+ CpuRegister reg = GetScratchRegister();
+ __ movq(reg, CpuRegister(RSP));
+ __ orq(reg, Immediate(0x2));
+ __ gs()->movq(Address::Absolute(thr_offs, true), reg);
+ } else {
+ __ gs()->movq(Address::Absolute(thr_offs, true), CpuRegister(RSP));
+ }
}
void X86_64JNIMacroAssembler::StoreSpanning(FrameOffset /*dst*/,
diff --git a/compiler/utils/x86_64/jni_macro_assembler_x86_64.h b/compiler/utils/x86_64/jni_macro_assembler_x86_64.h
index e080f0b..feaf27e 100644
--- a/compiler/utils/x86_64/jni_macro_assembler_x86_64.h
+++ b/compiler/utils/x86_64/jni_macro_assembler_x86_64.h
@@ -67,7 +67,7 @@
void StoreStackOffsetToThread(ThreadOffset64 thr_offs, FrameOffset fr_offs) override;
- void StoreStackPointerToThread(ThreadOffset64 thr_offs) override;
+ void StoreStackPointerToThread(ThreadOffset64 thr_offs, bool tag_sp) override;
void StoreSpanning(FrameOffset dest, ManagedRegister src, FrameOffset in_off) override;
diff --git a/dex2oat/art_standalone_dex2oat_tests.xml b/dex2oat/art_standalone_dex2oat_tests.xml
index 32346f2..1707887 100644
--- a/dex2oat/art_standalone_dex2oat_tests.xml
+++ b/dex2oat/art_standalone_dex2oat_tests.xml
@@ -77,6 +77,8 @@
<option name="mainline-module-package-name" value="com.android.art" />
</object>
+ <!-- Skip on HWASan. TODO(b/230394041): Re-enable -->
+ <object type="module_controller" class="com.android.tradefed.testtype.suite.module.SkipHWASanModuleController" />
<!-- Only run tests if the device under test is SDK version 31 (Android 12) or above. -->
<object type="module_controller" class="com.android.tradefed.testtype.suite.module.Sdk31ModuleController" />
</configuration>
diff --git a/libartbase/Android.bp b/libartbase/Android.bp
index f937523..80c63c6 100644
--- a/libartbase/Android.bp
+++ b/libartbase/Android.bp
@@ -70,6 +70,7 @@
// ZipArchive support, the order matters here to get all symbols.
"libziparchive",
],
+ whole_static_libs: ["libtinyxml2"],
shared_libs: [
"libz",
"liblog",
@@ -88,6 +89,7 @@
static: {
cflags: ["-DART_STATIC_LIBARTBASE"],
},
+ whole_static_libs: ["libtinyxml2"],
shared_libs: [
"libziparchive",
"libz",
@@ -112,6 +114,7 @@
// For common macros.
"libbase",
],
+ whole_static_libs: ["libtinyxml2"],
export_static_lib_headers: ["libbase"], // ART's macros.h depends on libbase's macros.h.
cflags: ["-Wno-thread-safety"],
@@ -391,6 +394,8 @@
shared_libs: ["libbase"],
export_shared_lib_headers: ["libbase"],
+ whole_static_libs: ["libtinyxml2"],
+
apex_available: [
"com.android.art",
"com.android.art.debug",
diff --git a/libartbase/base/flags.h b/libartbase/base/flags.h
index d1e1ca6..fc568b2 100644
--- a/libartbase/base/flags.h
+++ b/libartbase/base/flags.h
@@ -304,6 +304,12 @@
// Note that the actual write is still controlled by
// MetricsReportingMods and MetricsReportingNumMods.
Flag<std::string> MetricsWriteToFile{"metrics.write-to-file", "", FlagType::kCmdlineOnly};
+
+ // The output format for metrics. This is only used
+ // when writing metrics to a file; metrics written
+ // to logcat will be in human-readable text format.
+ // Supported values are "text" and "xml".
+ Flag<std::string> MetricsFormat{"metrics.format", "text", FlagType::kCmdlineOnly};
};
// This is the actual instance of all the flags.
diff --git a/libartbase/base/metrics/metrics.h b/libartbase/base/metrics/metrics.h
index d6f2463..4a1664a 100644
--- a/libartbase/base/metrics/metrics.h
+++ b/libartbase/base/metrics/metrics.h
@@ -30,6 +30,7 @@
#include "android-base/logging.h"
#include "base/bit_utils.h"
#include "base/time_utils.h"
+#include "tinyxml2.h"
#pragma clang diagnostic push
#pragma clang diagnostic error "-Wconversion"
@@ -129,7 +130,7 @@
}
#undef REASON_NAME
-#undef ReasonFromName
+#undef REASON_FROM_NAME
#define COMPILER_FILTER_REPORTING_LIST(V) \
V(kError, "error") /* Error (invalid value) condition */ \
@@ -433,12 +434,80 @@
friend class ArtMetrics;
};
-// A backend that writes metrics in a human-readable format to a string.
+// Base class for formatting metrics into different formats
+// (human-readable text, JSON, etc.)
+class MetricsFormatter {
+ public:
+ virtual ~MetricsFormatter() = default;
+
+ virtual void FormatBeginReport(uint64_t timestamp_since_start_ms,
+ const std::optional<SessionData>& session_data) = 0;
+ virtual void FormatEndReport() = 0;
+ virtual void FormatReportCounter(DatumId counter_type, uint64_t value) = 0;
+ virtual void FormatReportHistogram(DatumId histogram_type,
+ int64_t low_value,
+ int64_t high_value,
+ const std::vector<uint32_t>& buckets) = 0;
+ virtual std::string GetAndResetBuffer() = 0;
+
+ protected:
+ const std::string version = "1.0";
+};
+
+// Formatter outputting metrics in human-readable text format
+class TextFormatter : public MetricsFormatter {
+ public:
+ TextFormatter() = default;
+
+ void FormatBeginReport(uint64_t timestamp_millis,
+ const std::optional<SessionData>& session_data) override;
+
+ void FormatReportCounter(DatumId counter_type, uint64_t value) override;
+
+ void FormatReportHistogram(DatumId histogram_type,
+ int64_t low_value,
+ int64_t high_value,
+ const std::vector<uint32_t>& buckets) override;
+
+ void FormatEndReport() override;
+
+ std::string GetAndResetBuffer() override;
+
+ private:
+ std::ostringstream os_;
+};
+
+// Formatter outputting metrics in XML format
+class XmlFormatter : public MetricsFormatter {
+ public:
+ XmlFormatter() = default;
+
+ void FormatBeginReport(uint64_t timestamp_millis,
+ const std::optional<SessionData>& session_data) override;
+
+ void FormatReportCounter(DatumId counter_type, uint64_t value) override;
+
+ void FormatReportHistogram(DatumId histogram_type,
+ int64_t low_value,
+ int64_t high_value,
+ const std::vector<uint32_t>& buckets) override;
+
+ void FormatEndReport() override;
+
+ std::string GetAndResetBuffer() override;
+
+ private:
+ tinyxml2::XMLDocument document_;
+};
+
+// A backend that writes metrics to a string.
+// The format of the metrics' output is delegated
+// to the MetricsFormatter class.
//
// This is used as a base for LogBackend and FileBackend.
class StringBackend : public MetricsBackend {
public:
- StringBackend();
+ explicit StringBackend(std::unique_ptr<MetricsFormatter> formatter);
void BeginOrUpdateSession(const SessionData& session_data) override;
@@ -456,14 +525,15 @@
std::string GetAndResetBuffer();
private:
- std::ostringstream os_;
+ std::unique_ptr<MetricsFormatter> formatter_;
std::optional<SessionData> session_data_;
};
// A backend that writes metrics in human-readable format to the log (i.e. logcat).
class LogBackend : public StringBackend {
public:
- explicit LogBackend(android::base::LogSeverity level);
+ explicit LogBackend(std::unique_ptr<MetricsFormatter> formatter,
+ android::base::LogSeverity level);
void BeginReport(uint64_t timestamp_millis) override;
void EndReport() override;
@@ -473,12 +543,10 @@
};
// A backend that writes metrics to a file.
-//
-// These are currently written in the same human-readable format used by StringBackend and
-// LogBackend, but we will probably want a more machine-readable format in the future.
class FileBackend : public StringBackend {
public:
- explicit FileBackend(const std::string& filename);
+ explicit FileBackend(std::unique_ptr<MetricsFormatter> formatter,
+ const std::string& filename);
void BeginReport(uint64_t timestamp_millis) override;
void EndReport() override;
diff --git a/libartbase/base/metrics/metrics_common.cc b/libartbase/base/metrics/metrics_common.cc
index f09987b..025f5eb 100644
--- a/libartbase/base/metrics/metrics_common.cc
+++ b/libartbase/base/metrics/metrics_common.cc
@@ -76,7 +76,7 @@
}
void ArtMetrics::DumpForSigQuit(std::ostream& os) const {
- StringBackend backend;
+ StringBackend backend(std::make_unique<TextFormatter>());
ReportAllMetrics(&backend);
os << backend.GetAndResetBuffer();
}
@@ -88,13 +88,12 @@
#undef ART_METRIC
}
-StringBackend::StringBackend() {}
+StringBackend::StringBackend(std::unique_ptr<MetricsFormatter> formatter)
+ : formatter_(std::move(formatter))
+{}
std::string StringBackend::GetAndResetBuffer() {
- std::string result = os_.str();
- os_.clear();
- os_.str("");
- return result;
+ return formatter_->GetAndResetBuffer();
}
void StringBackend::BeginOrUpdateSession(const SessionData& session_data) {
@@ -102,32 +101,51 @@
}
void StringBackend::BeginReport(uint64_t timestamp_since_start_ms) {
- os_ << "\n*** ART internal metrics ***\n";
- os_ << " Metadata:\n";
- os_ << " timestamp_since_start_ms: " << timestamp_since_start_ms << "\n";
- if (session_data_.has_value()) {
- os_ << " session_id: " << session_data_->session_id << "\n";
- os_ << " uid: " << session_data_->uid << "\n";
- os_ << " compilation_reason: " << CompilationReasonName(session_data_->compilation_reason)
- << "\n";
- os_ << " compiler_filter: " << CompilerFilterReportingName(session_data_->compiler_filter)
- << "\n";
- }
- os_ << " Metrics:\n";
+ formatter_->FormatBeginReport(timestamp_since_start_ms, session_data_);
}
-void StringBackend::EndReport() { os_ << "*** Done dumping ART internal metrics ***\n"; }
+void StringBackend::EndReport() {
+ formatter_->FormatEndReport();
+}
void StringBackend::ReportCounter(DatumId counter_type, uint64_t value) {
- os_ << " " << DatumName(counter_type) << ": count = " << value << "\n";
+ formatter_->FormatReportCounter(counter_type, value);
}
void StringBackend::ReportHistogram(DatumId histogram_type,
int64_t minimum_value_,
int64_t maximum_value_,
const std::vector<uint32_t>& buckets) {
- os_ << " " << DatumName(histogram_type) << ": range = " << minimum_value_ << "..." << maximum_value_;
- if (buckets.size() > 0) {
+ formatter_->FormatReportHistogram(histogram_type, minimum_value_, maximum_value_, buckets);
+}
+
+void TextFormatter::FormatBeginReport(uint64_t timestamp_since_start_ms,
+ const std::optional<SessionData>& session_data) {
+ os_ << "\n*** ART internal metrics ***\n";
+ os_ << " Metadata:\n";
+ os_ << " timestamp_since_start_ms: " << timestamp_since_start_ms << "\n";
+ if (session_data.has_value()) {
+ os_ << " session_id: " << session_data->session_id << "\n";
+ os_ << " uid: " << session_data->uid << "\n";
+ os_ << " compilation_reason: " << CompilationReasonName(session_data->compilation_reason)
+ << "\n";
+ os_ << " compiler_filter: " << CompilerFilterReportingName(session_data->compiler_filter)
+ << "\n";
+ }
+ os_ << " Metrics:\n";
+}
+
+void TextFormatter::FormatReportCounter(DatumId counter_type, uint64_t value) {
+ os_ << " " << DatumName(counter_type) << ": count = " << value << "\n";
+}
+
+void TextFormatter::FormatReportHistogram(DatumId histogram_type,
+ int64_t minimum_value_,
+ int64_t maximum_value_,
+ const std::vector<uint32_t>& buckets) {
+ os_ << " " << DatumName(histogram_type) << ": range = "
+ << minimum_value_ << "..." << maximum_value_;
+ if (!buckets.empty()) {
os_ << ", buckets: ";
bool first = true;
for (const auto& count : buckets) {
@@ -143,22 +161,100 @@
}
}
-LogBackend::LogBackend(android::base::LogSeverity level) : level_{level} {}
+void TextFormatter::FormatEndReport() {
+ os_ << "*** Done dumping ART internal metrics ***\n";
+}
+
+std::string TextFormatter::GetAndResetBuffer() {
+ std::string result = os_.str();
+ os_.clear();
+ os_.str("");
+ return result;
+}
+
+void XmlFormatter::FormatBeginReport(uint64_t timestamp_millis,
+ const std::optional<SessionData>& session_data) {
+ tinyxml2::XMLElement* art_runtime_metrics = document_.NewElement("art_runtime_metrics");
+ document_.InsertEndChild(art_runtime_metrics);
+
+ art_runtime_metrics->InsertNewChildElement("version")->SetText(version.data());
+
+ tinyxml2::XMLElement* metadata = art_runtime_metrics->InsertNewChildElement("metadata");
+ metadata->InsertNewChildElement("timestamp_since_start_ms")->SetText(timestamp_millis);
+
+ if (session_data.has_value()) {
+ metadata->InsertNewChildElement("session_id")->SetText(session_data->session_id);
+ metadata->InsertNewChildElement("uid")->SetText(session_data->uid);
+ metadata
+ ->InsertNewChildElement("compilation_reason")
+ ->SetText(CompilationReasonName(session_data->compilation_reason));
+ metadata
+ ->InsertNewChildElement("compiler_filter")
+ ->SetText(CompilerFilterReportingName(session_data->compiler_filter));
+ }
+
+ art_runtime_metrics->InsertNewChildElement("metrics");
+}
+
+void XmlFormatter::FormatReportCounter(DatumId counter_type, uint64_t value) {
+ tinyxml2::XMLElement* metrics = document_.RootElement()->FirstChildElement("metrics");
+
+ tinyxml2::XMLElement* counter = metrics->InsertNewChildElement(DatumName(counter_type).data());
+ counter->InsertNewChildElement("counter_type")->SetText("count");
+ counter->InsertNewChildElement("value")->SetText(value);
+}
+
+void XmlFormatter::FormatReportHistogram(DatumId histogram_type,
+ int64_t low_value,
+ int64_t high_value,
+ const std::vector<uint32_t>& buckets) {
+ tinyxml2::XMLElement* metrics = document_.RootElement()->FirstChildElement("metrics");
+
+ tinyxml2::XMLElement* histogram =
+ metrics->InsertNewChildElement(DatumName(histogram_type).data());
+ histogram->InsertNewChildElement("counter_type")->SetText("histogram");
+ histogram->InsertNewChildElement("minimum_value")->SetText(low_value);
+ histogram->InsertNewChildElement("maximum_value")->SetText(high_value);
+
+ tinyxml2::XMLElement* buckets_element = histogram->InsertNewChildElement("buckets");
+ for (const auto& count : buckets) {
+ buckets_element->InsertNewChildElement("bucket")->SetText(count);
+ }
+}
+
+void XmlFormatter::FormatEndReport() {}
+
+std::string XmlFormatter::GetAndResetBuffer() {
+ tinyxml2::XMLPrinter printer(/*file=*/nullptr, /*compact=*/true);
+ document_.Print(&printer);
+ std::string result = printer.CStr();
+ document_.Clear();
+
+ return result;
+}
+
+LogBackend::LogBackend(std::unique_ptr<MetricsFormatter> formatter,
+ android::base::LogSeverity level)
+ : StringBackend{std::move(formatter)}, level_{level}
+{}
void LogBackend::BeginReport(uint64_t timestamp_since_start_ms) {
- GetAndResetBuffer();
+ StringBackend::GetAndResetBuffer();
StringBackend::BeginReport(timestamp_since_start_ms);
}
void LogBackend::EndReport() {
StringBackend::EndReport();
- LOG_STREAM(level_) << GetAndResetBuffer();
+ LOG_STREAM(level_) << StringBackend::GetAndResetBuffer();
}
-FileBackend::FileBackend(const std::string& filename) : filename_{filename} {}
+FileBackend::FileBackend(std::unique_ptr<MetricsFormatter> formatter,
+ const std::string& filename)
+ : StringBackend{std::move(formatter)}, filename_{filename}
+{}
void FileBackend::BeginReport(uint64_t timestamp_since_start_ms) {
- GetAndResetBuffer();
+ StringBackend::GetAndResetBuffer();
StringBackend::BeginReport(timestamp_since_start_ms);
}
@@ -170,7 +266,7 @@
if (file.get() == nullptr) {
LOG(WARNING) << "Could open metrics file '" << filename_ << "': " << error_message;
} else {
- if (!android::base::WriteStringToFd(GetAndResetBuffer(), file.get()->Fd())) {
+ if (!android::base::WriteStringToFd(StringBackend::GetAndResetBuffer(), file.get()->Fd())) {
PLOG(WARNING) << "Error writing metrics to file";
}
}
diff --git a/libartbase/base/metrics/metrics_test.cc b/libartbase/base/metrics/metrics_test.cc
index ed6f88d..2820290 100644
--- a/libartbase/base/metrics/metrics_test.cc
+++ b/libartbase/base/metrics/metrics_test.cc
@@ -16,6 +16,7 @@
#include "metrics.h"
+#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "metrics_test.h"
@@ -248,7 +249,7 @@
// Makes sure all defined metrics are included when dumping through StreamBackend.
TEST_F(MetricsTest, StreamBackendDumpAllMetrics) {
ArtMetrics metrics;
- StringBackend backend;
+ StringBackend backend(std::make_unique<TextFormatter>());
metrics.ReportAllMetrics(&backend);
@@ -305,6 +306,266 @@
metrics.ReportAllMetrics(&zero_backend);
}
+TEST(TextFormatterTest, ReportMetrics_WithBuckets) {
+ TextFormatter text_formatter;
+ SessionData session_data {
+ .session_id = 1000,
+ .uid = 50,
+ .compilation_reason = CompilationReason::kInstall,
+ .compiler_filter = CompilerFilterReporting::kSpeed,
+ };
+
+ text_formatter.FormatBeginReport(200, session_data);
+ text_formatter.FormatReportCounter(DatumId::kFullGcCount, 1u);
+ text_formatter.FormatReportHistogram(DatumId::kFullGcCollectionTime,
+ 50,
+ 200,
+ {2, 4, 7, 1});
+ text_formatter.FormatEndReport();
+
+ const std::string result = text_formatter.GetAndResetBuffer();
+ ASSERT_EQ(result,
+ "\n*** ART internal metrics ***\n"
+ " Metadata:\n"
+ " timestamp_since_start_ms: 200\n"
+ " session_id: 1000\n"
+ " uid: 50\n"
+ " compilation_reason: install\n"
+ " compiler_filter: speed\n"
+ " Metrics:\n"
+ " FullGcCount: count = 1\n"
+ " FullGcCollectionTime: range = 50...200, buckets: 2,4,7,1\n"
+ "*** Done dumping ART internal metrics ***\n");
+}
+
+TEST(TextFormatterTest, ReportMetrics_NoBuckets) {
+ TextFormatter text_formatter;
+ SessionData session_data {
+ .session_id = 500,
+ .uid = 15,
+ .compilation_reason = CompilationReason::kCmdLine,
+ .compiler_filter = CompilerFilterReporting::kExtract,
+ };
+
+ text_formatter.FormatBeginReport(400, session_data);
+ text_formatter.FormatReportHistogram(DatumId::kFullGcCollectionTime, 10, 20, {});
+ text_formatter.FormatEndReport();
+
+ std::string result = text_formatter.GetAndResetBuffer();
+ ASSERT_EQ(result,
+ "\n*** ART internal metrics ***\n"
+ " Metadata:\n"
+ " timestamp_since_start_ms: 400\n"
+ " session_id: 500\n"
+ " uid: 15\n"
+ " compilation_reason: cmdline\n"
+ " compiler_filter: extract\n"
+ " Metrics:\n"
+ " FullGcCollectionTime: range = 10...20, no buckets\n"
+ "*** Done dumping ART internal metrics ***\n");
+}
+
+TEST(TextFormatterTest, BeginReport_NoSessionData) {
+ TextFormatter text_formatter;
+ std::optional<SessionData> empty_session_data;
+
+ text_formatter.FormatBeginReport(100, empty_session_data);
+ text_formatter.FormatEndReport();
+
+ std::string result = text_formatter.GetAndResetBuffer();
+ ASSERT_EQ(result,
+ "\n*** ART internal metrics ***\n"
+ " Metadata:\n"
+ " timestamp_since_start_ms: 100\n"
+ " Metrics:\n"
+ "*** Done dumping ART internal metrics ***\n");
+}
+
+TEST(TextFormatterTest, GetAndResetBuffer_ActuallyResetsBuffer) {
+ TextFormatter text_formatter;
+ std::optional<SessionData> empty_session_data;
+
+ text_formatter.FormatBeginReport(200, empty_session_data);
+ text_formatter.FormatReportCounter(DatumId::kFullGcCount, 1u);
+ text_formatter.FormatEndReport();
+
+ std::string result = text_formatter.GetAndResetBuffer();
+ ASSERT_EQ(result,
+ "\n*** ART internal metrics ***\n"
+ " Metadata:\n"
+ " timestamp_since_start_ms: 200\n"
+ " Metrics:\n"
+ " FullGcCount: count = 1\n"
+ "*** Done dumping ART internal metrics ***\n");
+
+ text_formatter.FormatBeginReport(300, empty_session_data);
+ text_formatter.FormatReportCounter(DatumId::kFullGcCount, 5u);
+ text_formatter.FormatEndReport();
+
+ result = text_formatter.GetAndResetBuffer();
+ ASSERT_EQ(result,
+ "\n*** ART internal metrics ***\n"
+ " Metadata:\n"
+ " timestamp_since_start_ms: 300\n"
+ " Metrics:\n"
+ " FullGcCount: count = 5\n"
+ "*** Done dumping ART internal metrics ***\n");
+}
+
+TEST(XmlFormatterTest, ReportMetrics_WithBuckets) {
+ XmlFormatter xml_formatter;
+ SessionData session_data {
+ .session_id = 123,
+ .uid = 456,
+ .compilation_reason = CompilationReason::kFirstBoot,
+ .compiler_filter = CompilerFilterReporting::kSpace,
+ };
+
+ xml_formatter.FormatBeginReport(250, session_data);
+ xml_formatter.FormatReportCounter(DatumId::kYoungGcCount, 3u);
+ xml_formatter.FormatReportHistogram(DatumId::kYoungGcCollectionTime,
+ 300,
+ 600,
+ {1, 5, 3});
+ xml_formatter.FormatEndReport();
+
+ const std::string result = xml_formatter.GetAndResetBuffer();
+ ASSERT_EQ(result,
+ "<art_runtime_metrics>"
+ "<version>1.0</version>"
+ "<metadata>"
+ "<timestamp_since_start_ms>250</timestamp_since_start_ms>"
+ "<session_id>123</session_id>"
+ "<uid>456</uid>"
+ "<compilation_reason>first-boot</compilation_reason>"
+ "<compiler_filter>space</compiler_filter>"
+ "</metadata>"
+ "<metrics>"
+ "<YoungGcCount>"
+ "<counter_type>count</counter_type>"
+ "<value>3</value>"
+ "</YoungGcCount>"
+ "<YoungGcCollectionTime>"
+ "<counter_type>histogram</counter_type>"
+ "<minimum_value>300</minimum_value>"
+ "<maximum_value>600</maximum_value>"
+ "<buckets>"
+ "<bucket>1</bucket>"
+ "<bucket>5</bucket>"
+ "<bucket>3</bucket>"
+ "</buckets>"
+ "</YoungGcCollectionTime>"
+ "</metrics>"
+ "</art_runtime_metrics>");
+}
+
+TEST(XmlFormatterTest, ReportMetrics_NoBuckets) {
+ XmlFormatter xml_formatter;
+ SessionData session_data {
+ .session_id = 234,
+ .uid = 345,
+ .compilation_reason = CompilationReason::kFirstBoot,
+ .compiler_filter = CompilerFilterReporting::kSpace,
+ };
+
+ xml_formatter.FormatBeginReport(160, session_data);
+ xml_formatter.FormatReportCounter(DatumId::kYoungGcCount, 4u);
+ xml_formatter.FormatReportHistogram(DatumId::kYoungGcCollectionTime, 20, 40, {});
+ xml_formatter.FormatEndReport();
+
+ const std::string result = xml_formatter.GetAndResetBuffer();
+ ASSERT_EQ(result,
+ "<art_runtime_metrics>"
+ "<version>1.0</version>"
+ "<metadata>"
+ "<timestamp_since_start_ms>160</timestamp_since_start_ms>"
+ "<session_id>234</session_id>"
+ "<uid>345</uid>"
+ "<compilation_reason>first-boot</compilation_reason>"
+ "<compiler_filter>space</compiler_filter>"
+ "</metadata>"
+ "<metrics>"
+ "<YoungGcCount>"
+ "<counter_type>count</counter_type>"
+ "<value>4</value>"
+ "</YoungGcCount>"
+ "<YoungGcCollectionTime>"
+ "<counter_type>histogram</counter_type>"
+ "<minimum_value>20</minimum_value>"
+ "<maximum_value>40</maximum_value>"
+ "<buckets/>"
+ "</YoungGcCollectionTime>"
+ "</metrics>"
+ "</art_runtime_metrics>");
+}
+
+TEST(XmlFormatterTest, BeginReport_NoSessionData) {
+ XmlFormatter xml_formatter;
+ std::optional<SessionData> empty_session_data;
+
+ xml_formatter.FormatBeginReport(100, empty_session_data);
+ xml_formatter.FormatReportCounter(DatumId::kYoungGcCount, 3u);
+ xml_formatter.FormatEndReport();
+
+ std::string result = xml_formatter.GetAndResetBuffer();
+ ASSERT_EQ(result,
+ "<art_runtime_metrics>"
+ "<version>1.0</version>"
+ "<metadata>"
+ "<timestamp_since_start_ms>100</timestamp_since_start_ms>"
+ "</metadata>"
+ "<metrics>"
+ "<YoungGcCount>"
+ "<counter_type>count</counter_type>"
+ "<value>3</value>"
+ "</YoungGcCount>"
+ "</metrics>"
+ "</art_runtime_metrics>");
+}
+
+TEST(XmlFormatterTest, GetAndResetBuffer_ActuallyResetsBuffer) {
+ XmlFormatter xml_formatter;
+ std::optional<SessionData> empty_session_data;
+
+ xml_formatter.FormatBeginReport(200, empty_session_data);
+ xml_formatter.FormatReportCounter(DatumId::kFullGcCount, 1u);
+ xml_formatter.FormatEndReport();
+
+ std::string result = xml_formatter.GetAndResetBuffer();
+ ASSERT_EQ(result,
+ "<art_runtime_metrics>"
+ "<version>1.0</version>"
+ "<metadata>"
+ "<timestamp_since_start_ms>200</timestamp_since_start_ms>"
+ "</metadata>"
+ "<metrics>"
+ "<FullGcCount>"
+ "<counter_type>count</counter_type>"
+ "<value>1</value>"
+ "</FullGcCount>"
+ "</metrics>"
+ "</art_runtime_metrics>");
+
+ xml_formatter.FormatBeginReport(300, empty_session_data);
+ xml_formatter.FormatReportCounter(DatumId::kFullGcCount, 5u);
+ xml_formatter.FormatEndReport();
+
+ result = xml_formatter.GetAndResetBuffer();
+ ASSERT_EQ(result,
+ "<art_runtime_metrics>"
+ "<version>1.0</version>"
+ "<metadata>"
+ "<timestamp_since_start_ms>300</timestamp_since_start_ms>"
+ "</metadata>"
+ "<metrics>"
+ "<FullGcCount>"
+ "<counter_type>count</counter_type>"
+ "<value>5</value>"
+ "</FullGcCount>"
+ "</metrics>"
+ "</art_runtime_metrics>");
+}
+
TEST(CompilerFilterReportingTest, FromName) {
ASSERT_EQ(CompilerFilterReportingFromName("error"),
CompilerFilterReporting::kError);
diff --git a/libartservice/api/current.txt b/libartservice/api/current.txt
deleted file mode 100644
index c7844e0..0000000
--- a/libartservice/api/current.txt
+++ /dev/null
@@ -1,9 +0,0 @@
-// Signature format: 2.0
-package com.android.server.art {
-
- public final class ArtManagerLocal {
- ctor public ArtManagerLocal();
- }
-
-}
-
diff --git a/libartservice/Android.bp b/libartservice/service/Android.bp
similarity index 74%
rename from libartservice/Android.bp
rename to libartservice/service/Android.bp
index 985a2eb..55b63b3 100644
--- a/libartservice/Android.bp
+++ b/libartservice/service/Android.bp
@@ -21,25 +21,29 @@
default_applicable_licenses: ["art_license"],
}
-cc_library {
- // This native library contains JNI support code for the ART Service Java
- // Language library.
-
- name: "libartservice",
+// This native library contains JNI support code for the ART Service Java
+// Language library.
+cc_defaults {
+ name: "libartservice_defaults",
defaults: ["art_defaults"],
host_supported: true,
srcs: [
- "service/native/service.cc",
+ "native/service.cc",
],
- export_include_dirs: ["."],
+ shared_libs: [
+ "libbase",
+ ],
+}
+
+cc_library {
+ name: "libartservice",
+ defaults: ["libartservice_defaults"],
apex_available: [
"com.android.art",
"com.android.art.debug",
],
shared_libs: [
- "libbase",
],
- export_shared_lib_headers: ["libbase"],
}
// Provides the API and implementation of the ART Service class that will be
@@ -68,27 +72,9 @@
"com.android.art",
"com.android.art.debug",
],
- sdk_version: "core_platform",
+ sdk_version: "system_server_current",
min_sdk_version: "31",
- public: {
- // Override the setting of "module_current" from the defaults as that is
- // not available in all manifests where this needs to be built.
- sdk_version: "core_current",
- },
-
- system_server: {
- // Override the setting of "module_current" from the defaults as that is
- // not available in all manifests where this needs to be built.
- sdk_version: "core_current",
- },
-
- // The API elements are the ones annotated with
- // libcore.api.CorePlatformApi(status=libcore.api.CorePlatformApi.Status.STABLE)
- droiddoc_options: [
- "--show-single-annotation libcore.api.CorePlatformApi\\(status=libcore.api.CorePlatformApi.Status.STABLE\\)",
- ],
-
// Temporarily disable compatibility with previous released APIs.
// TODO - remove once prototype has stabilized
// running "m update-api" will give instructions on what to do next
@@ -100,11 +86,14 @@
compile_dex: true,
srcs: [
- "service/java/com/android/server/art/ArtManagerLocal.java",
+ "java/**/*.java",
+ ],
+
+ required: [
+ "libartservice",
],
libs: [
- "art.module.api.annotations.for.system.modules",
],
plugins: ["java_api_finder"],
@@ -113,12 +102,9 @@
art_cc_defaults {
name: "art_libartservice_tests_defaults",
+ defaults: ["libartservice_defaults"],
srcs: [
- "service/native/service_test.cc",
- ],
- shared_libs: [
- "libbase",
- "libartservice",
+ "native/service_test.cc",
],
}
@@ -141,3 +127,25 @@
"art_libartservice_tests_defaults",
],
}
+
+android_test {
+ name: "ArtServiceTests",
+
+ // Include all test java files.
+ srcs: [
+ "javatests/**/*.java",
+ ],
+
+ static_libs: [
+ "androidx.test.ext.junit",
+ "androidx.test.ext.truth",
+ "androidx.test.runner",
+ "mockito-target-minus-junit4",
+ "service-art.impl",
+ ],
+
+ sdk_version: "system_server_current",
+ min_sdk_version: "31",
+
+ test_suites: ["general-tests"],
+}
diff --git a/libartservice/service/AndroidManifest.xml b/libartservice/service/AndroidManifest.xml
new file mode 100644
index 0000000..921bde9
--- /dev/null
+++ b/libartservice/service/AndroidManifest.xml
@@ -0,0 +1,30 @@
+<?xml version="1.0" encoding="utf-8"?>
+
+<!--
+ ~ Copyright (C) 2022 The Android Open Source Project
+ ~
+ ~ Licensed under the Apache License, Version 2.0 (the "License");
+ ~ you may not use this file except in compliance with the License.
+ ~ You may obtain a copy of the License at
+ ~
+ ~ http://www.apache.org/licenses/LICENSE-2.0
+ ~
+ ~ Unless required by applicable law or agreed to in writing, software
+ ~ distributed under the License is distributed on an "AS IS" BASIS,
+ ~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ ~ See the License for the specific language governing permissions and
+ ~ limitations under the License.
+ -->
+
+<manifest
+ xmlns:android="http://schemas.android.com/apk/res/android"
+ package="com.android.server.art.tests">
+
+ <application android:label="ArtServiceTests">
+ <uses-library android:name="android.test.runner" />
+ </application>
+
+ <instrumentation android:name="androidx.test.runner.AndroidJUnitRunner"
+ android:targetPackage="com.android.server.art.tests"
+ android:label="Tests for ART Serices" />
+</manifest>
diff --git a/libartservice/api/removed.txt b/libartservice/service/api/current.txt
similarity index 100%
copy from libartservice/api/removed.txt
copy to libartservice/service/api/current.txt
diff --git a/libartservice/api/removed.txt b/libartservice/service/api/removed.txt
similarity index 100%
rename from libartservice/api/removed.txt
rename to libartservice/service/api/removed.txt
diff --git a/libartservice/api/system-server-current.txt b/libartservice/service/api/system-server-current.txt
similarity index 100%
rename from libartservice/api/system-server-current.txt
rename to libartservice/service/api/system-server-current.txt
diff --git a/libartservice/api/system-server-removed.txt b/libartservice/service/api/system-server-removed.txt
similarity index 100%
rename from libartservice/api/system-server-removed.txt
rename to libartservice/service/api/system-server-removed.txt
diff --git a/libartservice/service/java/com/android/server/art/ArtManagerLocal.java b/libartservice/service/java/com/android/server/art/ArtManagerLocal.java
index aac4b25..64aec7b 100644
--- a/libartservice/service/java/com/android/server/art/ArtManagerLocal.java
+++ b/libartservice/service/java/com/android/server/art/ArtManagerLocal.java
@@ -16,15 +16,16 @@
package com.android.server.art;
-import libcore.api.CorePlatformApi;
+import android.annotation.SystemApi;
/**
- * This class provides a system API for functionality provided by the ART
- * module.
+ * This class provides a system API for functionality provided by the ART module.
+ *
+ * @hide
*/
-@libcore.api.CorePlatformApi(status = libcore.api.CorePlatformApi.Status.STABLE)
+@SystemApi(client = SystemApi.Client.SYSTEM_SERVER)
public final class ArtManagerLocal {
- static final String LOG_TAG = "ArtService";
+ private static final String TAG = "ArtService";
public ArtManagerLocal() {}
}
diff --git a/libartservice/tests/src/com/android/server/art/ArtManagerLocalTests.java b/libartservice/service/javatests/com/android/server/art/ArtManagerLocalTest.java
similarity index 68%
rename from libartservice/tests/src/com/android/server/art/ArtManagerLocalTests.java
rename to libartservice/service/javatests/com/android/server/art/ArtManagerLocalTest.java
index 515849b..a27dfa5 100644
--- a/libartservice/tests/src/com/android/server/art/ArtManagerLocalTests.java
+++ b/libartservice/service/javatests/com/android/server/art/ArtManagerLocalTest.java
@@ -16,22 +16,29 @@
package com.android.server.art;
-import static org.junit.Assert.assertTrue;
+import static com.google.common.truth.Truth.assertThat;
-import androidx.test.runner.AndroidJUnit4;
+import androidx.test.filters.SmallTest;
import com.android.server.art.ArtManagerLocal;
-import junit.framework.TestCase;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.mockito.junit.MockitoJUnitRunner;
-public class ArtManagerLocalTests extends TestCase {
+@SmallTest
+@RunWith(MockitoJUnitRunner.class)
+public class ArtManagerLocalTest {
private ArtManagerLocal mArtManagerLocal;
- public void setup() {
+ @Before
+ public void setUp() {
mArtManagerLocal = new ArtManagerLocal();
}
+ @Test
public void testScaffolding() {
- assertTrue(true);
+ assertThat(true).isTrue();
}
-}
\ No newline at end of file
+}
diff --git a/libartservice/tests/Android.bp b/libartservice/tests/Android.bp
deleted file mode 100644
index dc110a1..0000000
--- a/libartservice/tests/Android.bp
+++ /dev/null
@@ -1,44 +0,0 @@
-//
-// Copyright (C) 2021 The Android Open Source Project
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-//########################################################################
-// Build ArtServiceTests package
-//########################################################################
-
-package {
- // See: http://go/android-license-faq
- // A large-scale-change added 'default_applicable_licenses' to import
- // all of the 'license_kinds' from "art_license"
- // to get the below license kinds:
- // SPDX-license-identifier-Apache-2.0
- default_applicable_licenses: ["art_license"],
-}
-
-java_test {
- name: "ArtServiceTests",
-
- // Include all test java files.
- srcs: [
- "src/**/*.java",
- ],
-
- static_libs: [
- "androidx.test.runner",
- "service-art.impl",
- ],
-
- test_suites: ["general-tests"],
-}
diff --git a/oatdump/art_standalone_oatdump_tests.xml b/oatdump/art_standalone_oatdump_tests.xml
index bcd94ed..664ac6c 100644
--- a/oatdump/art_standalone_oatdump_tests.xml
+++ b/oatdump/art_standalone_oatdump_tests.xml
@@ -54,7 +54,8 @@
<!-- ART Mainline Module (external (AOSP) version). -->
<option name="mainline-module-package-name" value="com.android.art" />
</object>
-
+ <!-- Skip on HWASan. TODO(b/230394041): Re-enable -->
+ <object type="module_controller" class="com.android.tradefed.testtype.suite.module.SkipHWASanModuleController" />
<!-- Only run tests if the device under test is SDK version 31 (Android 12) or above. -->
<object type="module_controller" class="com.android.tradefed.testtype.suite.module.Sdk31ModuleController" />
</configuration>
diff --git a/odrefresh/Android.bp b/odrefresh/Android.bp
index 8fee83a..55a1fde 100644
--- a/odrefresh/Android.bp
+++ b/odrefresh/Android.bp
@@ -47,7 +47,6 @@
],
static_libs: [
"libc++fs",
- "libtinyxml2",
],
tidy: true,
tidy_disabled_srcs: [":art-apex-cache-info"],
diff --git a/openjdkjvmti/events.cc b/openjdkjvmti/events.cc
index a6425af..58601aa 100644
--- a/openjdkjvmti/events.cc
+++ b/openjdkjvmti/events.cc
@@ -741,7 +741,6 @@
// Call-back for when a method is popped due to an exception throw. A method will either cause a
// MethodExited call-back or a MethodUnwind call-back when its activation is removed.
void MethodUnwind(art::Thread* self,
- art::Handle<art::mirror::Object> this_object ATTRIBUTE_UNUSED,
art::ArtMethod* method,
uint32_t dex_pc ATTRIBUTE_UNUSED)
REQUIRES_SHARED(art::Locks::mutator_lock_) override {
diff --git a/runtime/Android.bp b/runtime/Android.bp
index 9e0e78e..98ddbb7 100644
--- a/runtime/Android.bp
+++ b/runtime/Android.bp
@@ -415,7 +415,6 @@
],
static_libs: [
"libstatslog_art",
- "libtinyxml2",
],
generated_sources: [
"apex-info-list-tinyxml",
diff --git a/runtime/arch/arm/asm_support_arm.S b/runtime/arch/arm/asm_support_arm.S
index 23d82ba..68afc24 100644
--- a/runtime/arch/arm/asm_support_arm.S
+++ b/runtime/arch/arm/asm_support_arm.S
@@ -315,10 +315,6 @@
DELIVER_PENDING_EXCEPTION
.endm
-.macro RETURN_OR_DELIVER_PENDING_EXCEPTION_R1
- RETURN_OR_DELIVER_PENDING_EXCEPTION_REG r1
-.endm
-
.macro RETURN_OR_DELIVER_PENDING_EXCEPTION
ldr ip, [rSELF, #THREAD_EXCEPTION_OFFSET] @ Get exception field.
cmp ip, #0
diff --git a/runtime/arch/arm/asm_support_arm.h b/runtime/arch/arm/asm_support_arm.h
index aff055e..5e3f854 100644
--- a/runtime/arch/arm/asm_support_arm.h
+++ b/runtime/arch/arm/asm_support_arm.h
@@ -25,6 +25,8 @@
#define FRAME_SIZE_SAVE_EVERYTHING 192
#define FRAME_SIZE_SAVE_EVERYTHING_FOR_CLINIT FRAME_SIZE_SAVE_EVERYTHING
#define FRAME_SIZE_SAVE_EVERYTHING_FOR_SUSPEND_CHECK FRAME_SIZE_SAVE_EVERYTHING
+#define SAVE_EVERYTHING_FRAME_R0_OFFSET \
+ (FRAME_SIZE_SAVE_EVERYTHING - CALLEE_SAVE_EVERYTHING_NUM_CORE_SPILLS * POINTER_SIZE)
// The offset from the art_quick_read_barrier_mark_introspection (used for field
// loads with 32-bit LDR) to the entrypoint for field loads with 16-bit LDR,
diff --git a/runtime/arch/arm/jni_entrypoints_arm.S b/runtime/arch/arm/jni_entrypoints_arm.S
index fc57df7..7270d20 100644
--- a/runtime/arch/arm/jni_entrypoints_arm.S
+++ b/runtime/arch/arm/jni_entrypoints_arm.S
@@ -99,7 +99,7 @@
// Call artFindNativeMethod() for normal native and artFindNativeMethodRunnable()
// for @FastNative or @CriticalNative.
ldr ip, [r0, #THREAD_TOP_QUICK_FRAME_OFFSET] // uintptr_t tagged_quick_frame
- bic ip, #1 // ArtMethod** sp
+ bic ip, #TAGGED_JNI_SP_MASK // ArtMethod** sp
ldr ip, [ip] // ArtMethod* method
ldr ip, [ip, #ART_METHOD_ACCESS_FLAGS_OFFSET] // uint32_t access_flags
tst ip, #(ACCESS_FLAGS_METHOD_IS_FAST_NATIVE | ACCESS_FLAGS_METHOD_IS_CRITICAL_NATIVE)
diff --git a/runtime/arch/arm/quick_entrypoints_arm.S b/runtime/arch/arm/quick_entrypoints_arm.S
index d6f129b..f3e53c1 100644
--- a/runtime/arch/arm/quick_entrypoints_arm.S
+++ b/runtime/arch/arm/quick_entrypoints_arm.S
@@ -134,16 +134,50 @@
.cfi_adjust_cfa_offset -52
.endm
-.macro RETURN_IF_RESULT_IS_ZERO
- cbnz r0, 1f @ result non-zero branch over
- bx lr @ return
+.macro RETURN_OR_DEOPT_OR_DELIVER_PENDING_EXCEPTION
+ ldr r1, [rSELF, # THREAD_EXCEPTION_OFFSET] // Get exception field.
+ cbnz r1, 1f
+ DEOPT_OR_RETURN r1 // Check if deopt is required
1:
+ DELIVER_PENDING_EXCEPTION
.endm
-.macro RETURN_IF_RESULT_IS_NON_ZERO
- cbz r0, 1f @ result zero branch over
- bx lr @ return
-1:
+.macro DEOPT_OR_RETURN temp, is_ref = 0
+ ldr \temp, [rSELF, #THREAD_DEOPT_CHECK_REQUIRED_OFFSET]
+ cbnz \temp, 2f
+ bx lr
+2:
+ SETUP_SAVE_EVERYTHING_FRAME \temp
+ mov r2, \is_ref // pass if result is a reference
+ mov r1, r0 // pass the result
+ mov r0, rSELF // Thread::Current
+ bl artDeoptimizeIfNeeded
+ .cfi_remember_state
+ RESTORE_SAVE_EVERYTHING_FRAME
+ REFRESH_MARKING_REGISTER
+ bx lr
+ CFI_RESTORE_STATE_AND_DEF_CFA sp, FRAME_SIZE_SAVE_EVERYTHING
+.endm
+
+.macro DEOPT_OR_RESTORE_SAVE_EVERYTHING_FRAME_AND_RETURN_R0 temp, is_ref
+ ldr \temp, [rSELF, #THREAD_DEOPT_CHECK_REQUIRED_OFFSET]
+ cbnz \temp, 2f
+ .cfi_remember_state
+ RESTORE_SAVE_EVERYTHING_FRAME_KEEP_R0
+ REFRESH_MARKING_REGISTER
+ bx lr
+ CFI_RESTORE_STATE_AND_DEF_CFA sp, FRAME_SIZE_SAVE_EVERYTHING
+2:
+ str r0, [sp, SAVE_EVERYTHING_FRAME_R0_OFFSET] // update result in the frame
+ mov r2, \is_ref // pass if result is a reference
+ mov r1, r0 // pass the result
+ mov r0, rSELF // Thread::Current
+ bl artDeoptimizeIfNeeded
+ .cfi_remember_state
+ RESTORE_SAVE_EVERYTHING_FRAME
+ REFRESH_MARKING_REGISTER
+ bx lr
+ CFI_RESTORE_STATE_AND_DEF_CFA sp, FRAME_SIZE_SAVE_EVERYTHING
.endm
.macro NO_ARG_RUNTIME_EXCEPTION c_name, cxx_name
@@ -183,12 +217,16 @@
.endm
.macro RETURN_IF_RESULT_IS_ZERO_OR_DELIVER
- RETURN_IF_RESULT_IS_ZERO
+ cbnz r0, 1f @ result non-zero branch over
+ DEOPT_OR_RETURN r1
+1:
DELIVER_PENDING_EXCEPTION
.endm
-.macro RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
- RETURN_IF_RESULT_IS_NON_ZERO
+.macro RETURN_IF_RESULT_IS_NON_ZERO_OR_DEOPT_OR_DELIVER
+ cbz r0, 1f @ result zero branch over
+ DEOPT_OR_RETURN r1, /*is_ref=*/1
+1:
DELIVER_PENDING_EXCEPTION
.endm
@@ -517,8 +555,7 @@
bl artLockObjectFromCode @ (Object* obj, Thread*)
RESTORE_SAVE_REFS_ONLY_FRAME
REFRESH_MARKING_REGISTER
- RETURN_IF_RESULT_IS_ZERO
- DELIVER_PENDING_EXCEPTION
+ RETURN_IF_RESULT_IS_ZERO_OR_DELIVER
END art_quick_lock_object_no_inline
/*
@@ -548,8 +585,7 @@
bl artUnlockObjectFromCode @ (Object* obj, Thread*)
RESTORE_SAVE_REFS_ONLY_FRAME
REFRESH_MARKING_REGISTER
- RETURN_IF_RESULT_IS_ZERO
- DELIVER_PENDING_EXCEPTION
+ RETURN_IF_RESULT_IS_ZERO_OR_DELIVER
END art_quick_unlock_object_no_inline
/*
@@ -601,13 +637,33 @@
.cfi_rel_offset \rReg, \offset
.endm
- /*
- * Macro to insert read barrier, only used in art_quick_aput_obj.
- * rObj and rDest are registers, offset is a defined literal such as MIRROR_OBJECT_CLASS_OFFSET.
- * TODO: When read barrier has a fast path, add heap unpoisoning support for the fast path.
- */
-.macro READ_BARRIER rDest, rObj, offset
+ // Helper macros for `art_quick_aput_obj`.
#ifdef USE_READ_BARRIER
+#ifdef USE_BAKER_READ_BARRIER
+.macro BAKER_RB_CHECK_GRAY_BIT_AND_LOAD rDest, rObj, offset, gray_slow_path_label
+ ldr ip, [\rObj, #MIRROR_OBJECT_LOCK_WORD_OFFSET]
+ tst ip, #LOCK_WORD_READ_BARRIER_STATE_MASK_SHIFTED
+ bne \gray_slow_path_label
+ // False dependency to avoid needing load/load fence.
+ add \rObj, \rObj, ip, lsr #32
+ ldr \rDest, [\rObj, #\offset]
+ UNPOISON_HEAP_REF \rDest
+.endm
+
+.macro BAKER_RB_LOAD_AND_MARK rDest, rObj, offset, mark_function
+ ldr \rDest, [\rObj, #\offset]
+ UNPOISON_HEAP_REF \rDest
+ str lr, [sp, #-8]! @ Save LR with correct stack alignment.
+ .cfi_rel_offset lr, 0
+ .cfi_adjust_cfa_offset 8
+ bl \mark_function
+ ldr lr, [sp], #8 @ Restore LR.
+ .cfi_restore lr
+ .cfi_adjust_cfa_offset -8
+.endm
+#else // USE_BAKER_READ_BARRIER
+ .extern artReadBarrierSlow
+.macro READ_BARRIER_SLOW rDest, rObj, offset
push {r0-r3, ip, lr} @ 6 words for saved registers (used in art_quick_aput_obj)
.cfi_adjust_cfa_offset 24
.cfi_rel_offset r0, 0
@@ -640,30 +696,36 @@
pop {lr} @ restore lr
.cfi_adjust_cfa_offset -4
.cfi_restore lr
-#else
- ldr \rDest, [\rObj, #\offset]
- UNPOISON_HEAP_REF \rDest
-#endif // USE_READ_BARRIER
.endm
+#endif // USE_BAKER_READ_BARRIER
+#endif // USE_READ_BARRIER
-#ifdef USE_READ_BARRIER
- .extern artReadBarrierSlow
-#endif
.hidden art_quick_aput_obj
ENTRY art_quick_aput_obj
-#ifdef USE_READ_BARRIER
- @ The offset to .Ldo_aput_null is too large to use cbz due to expansion from READ_BARRIER macro.
+#if defined(USE_READ_BARRIER) && !defined(USE_BAKER_READ_BARRIER)
+ @ The offset to .Ldo_aput_null is too large to use cbz due to expansion from `READ_BARRIER_SLOW`.
tst r2, r2
- beq .Ldo_aput_null
-#else
- cbz r2, .Ldo_aput_null
+ beq .Laput_obj_null
+ READ_BARRIER_SLOW r3, r0, MIRROR_OBJECT_CLASS_OFFSET
+ READ_BARRIER_SLOW r3, r3, MIRROR_CLASS_COMPONENT_TYPE_OFFSET
+ READ_BARRIER_SLOW r4, r2, MIRROR_OBJECT_CLASS_OFFSET
+#else // !defined(USE_READ_BARRIER) || defined(USE_BAKER_READ_BARRIER)
+ cbz r2, .Laput_obj_null
+#ifdef USE_READ_BARRIER
+ cmp rMR, #0
+ bne .Laput_obj_gc_marking
#endif // USE_READ_BARRIER
- READ_BARRIER r3, r0, MIRROR_OBJECT_CLASS_OFFSET
- READ_BARRIER ip, r2, MIRROR_OBJECT_CLASS_OFFSET
- READ_BARRIER r3, r3, MIRROR_CLASS_COMPONENT_TYPE_OFFSET
- cmp r3, ip @ value's type == array's component type - trivial assignability
- bne .Lcheck_assignability
-.Ldo_aput:
+ ldr r3, [r0, #MIRROR_OBJECT_CLASS_OFFSET]
+ UNPOISON_HEAP_REF r3
+ // R4 is a scratch register in managed ARM ABI.
+ ldr r4, [r2, #MIRROR_OBJECT_CLASS_OFFSET]
+ UNPOISON_HEAP_REF r4
+ ldr r3, [r3, #MIRROR_CLASS_COMPONENT_TYPE_OFFSET]
+ UNPOISON_HEAP_REF r3
+#endif // !defined(USE_READ_BARRIER) || defined(USE_BAKER_READ_BARRIER)
+ cmp r3, r4 @ value's type == array's component type - trivial assignability
+ bne .Laput_obj_check_assignability
+.Laput_obj_store:
add r3, r0, #MIRROR_OBJECT_ARRAY_DATA_OFFSET
POISON_HEAP_REF r2
str r2, [r3, r1, lsl #2]
@@ -671,26 +733,22 @@
lsr r0, r0, #CARD_TABLE_CARD_SHIFT
strb r3, [r3, r0]
blx lr
-.Ldo_aput_null:
+
+.Laput_obj_null:
add r3, r0, #MIRROR_OBJECT_ARRAY_DATA_OFFSET
str r2, [r3, r1, lsl #2]
blx lr
-.Lcheck_assignability:
+
+.Laput_obj_check_assignability:
push {r0-r2, lr} @ save arguments
.cfi_adjust_cfa_offset 16
- .cfi_rel_offset r0, 0
- .cfi_rel_offset r1, 4
- .cfi_rel_offset r2, 8
.cfi_rel_offset lr, 12
- mov r1, ip
+ mov r1, r4
mov r0, r3
bl artIsAssignableFromCode
cbz r0, .Lthrow_array_store_exception
.cfi_remember_state
pop {r0-r2, lr}
- .cfi_restore r0
- .cfi_restore r1
- .cfi_restore r2
.cfi_restore lr
.cfi_adjust_cfa_offset -16
add r3, r0, #MIRROR_OBJECT_ARRAY_DATA_OFFSET
@@ -700,19 +758,52 @@
lsr r0, r0, #CARD_TABLE_CARD_SHIFT
strb r3, [r3, r0]
blx lr
+
.Lthrow_array_store_exception:
CFI_RESTORE_STATE_AND_DEF_CFA sp, 16
pop {r0-r2, lr}
- .cfi_restore r0
- .cfi_restore r1
- .cfi_restore r2
.cfi_restore lr
.cfi_adjust_cfa_offset -16
+#if defined(USE_READ_BARRIER) && defined(USE_BAKER_READ_BARRIER)
+ .cfi_remember_state
+#endif // defined(USE_READ_BARRIER) && defined(USE_BAKER_READ_BARRIER)
SETUP_SAVE_ALL_CALLEE_SAVES_FRAME r3
mov r1, r2
- mov r2, rSELF @ pass Thread::Current
+ mov r2, rSELF @ Pass Thread::Current.
bl artThrowArrayStoreException @ (Class*, Class*, Thread*)
- bkpt @ unreached
+ bkpt @ Unreachable.
+
+#if defined(USE_READ_BARRIER) && defined(USE_BAKER_READ_BARRIER)
+ CFI_RESTORE_STATE_AND_DEF_CFA sp, 0
+.Laput_obj_gc_marking:
+ BAKER_RB_CHECK_GRAY_BIT_AND_LOAD \
+ r3, r0, MIRROR_OBJECT_CLASS_OFFSET, .Laput_obj_mark_array_class
+.Laput_obj_mark_array_class_continue:
+ BAKER_RB_CHECK_GRAY_BIT_AND_LOAD \
+ r3, r3, MIRROR_CLASS_COMPONENT_TYPE_OFFSET, .Laput_obj_mark_array_element
+.Laput_obj_mark_array_element_continue:
+ BAKER_RB_CHECK_GRAY_BIT_AND_LOAD \
+ r4, r2, MIRROR_OBJECT_CLASS_OFFSET, .Laput_obj_mark_object_class
+.Laput_obj_mark_object_class_continue:
+
+ cmp r3, r4 @ value's type == array's component type - trivial assignability
+ // All registers are set up for correctly `.Laput_obj_check_assignability`.
+ bne .Laput_obj_check_assignability
+ b .Laput_obj_store
+
+.Laput_obj_mark_array_class:
+ BAKER_RB_LOAD_AND_MARK r3, r0, MIRROR_OBJECT_CLASS_OFFSET, art_quick_read_barrier_mark_reg03
+ b .Laput_obj_mark_array_class_continue
+
+.Laput_obj_mark_array_element:
+ BAKER_RB_LOAD_AND_MARK \
+ r3, r3, MIRROR_CLASS_COMPONENT_TYPE_OFFSET, art_quick_read_barrier_mark_reg03
+ b .Laput_obj_mark_array_element_continue
+
+.Laput_obj_mark_object_class:
+ BAKER_RB_LOAD_AND_MARK r4, r2, MIRROR_OBJECT_CLASS_OFFSET, art_quick_read_barrier_mark_reg04
+ b .Laput_obj_mark_object_class_continue
+#endif // defined(USE_READ_BARRIER) && defined(USE_BAKER_READ_BARRIER)
END art_quick_aput_obj
// Macro to facilitate adding new allocation entrypoints.
@@ -782,11 +873,8 @@
mov r1, rSELF @ pass Thread::Current
bl \entrypoint @ (uint32_t index, Thread*)
cbz r0, 1f @ If result is null, deliver the OOME.
- .cfi_remember_state
- RESTORE_SAVE_EVERYTHING_FRAME_KEEP_R0
- REFRESH_MARKING_REGISTER
- bx lr
- CFI_RESTORE_STATE_AND_DEF_CFA sp, FRAME_SIZE_SAVE_EVERYTHING
+ str r0, [sp, #136] @ store result in the frame
+ DEOPT_OR_RESTORE_SAVE_EVERYTHING_FRAME_AND_RETURN_R0 r1, /* is_ref= */ 1
1:
DELIVER_PENDING_EXCEPTION_FRAME_READY
END \name
@@ -809,12 +897,12 @@
/*
* Called by managed code to resolve a static field and load a non-wide value.
*/
-ONE_ARG_REF_DOWNCALL art_quick_get_byte_static, artGetByteStaticFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_R1
-ONE_ARG_REF_DOWNCALL art_quick_get_boolean_static, artGetBooleanStaticFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_R1
-ONE_ARG_REF_DOWNCALL art_quick_get_short_static, artGetShortStaticFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_R1
-ONE_ARG_REF_DOWNCALL art_quick_get_char_static, artGetCharStaticFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_R1
-ONE_ARG_REF_DOWNCALL art_quick_get32_static, artGet32StaticFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_R1
-ONE_ARG_REF_DOWNCALL art_quick_get_obj_static, artGetObjStaticFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_R1
+ONE_ARG_REF_DOWNCALL art_quick_get_byte_static, artGetByteStaticFromCompiledCode, RETURN_OR_DEOPT_OR_DELIVER_PENDING_EXCEPTION
+ONE_ARG_REF_DOWNCALL art_quick_get_boolean_static, artGetBooleanStaticFromCompiledCode, RETURN_OR_DEOPT_OR_DELIVER_PENDING_EXCEPTION
+ONE_ARG_REF_DOWNCALL art_quick_get_short_static, artGetShortStaticFromCompiledCode, RETURN_OR_DEOPT_OR_DELIVER_PENDING_EXCEPTION
+ONE_ARG_REF_DOWNCALL art_quick_get_char_static, artGetCharStaticFromCompiledCode, RETURN_OR_DEOPT_OR_DELIVER_PENDING_EXCEPTION
+ONE_ARG_REF_DOWNCALL art_quick_get32_static, artGet32StaticFromCompiledCode, RETURN_OR_DEOPT_OR_DELIVER_PENDING_EXCEPTION
+ONE_ARG_REF_DOWNCALL art_quick_get_obj_static, artGetObjStaticFromCompiledCode, RETURN_OR_DEOPT_OR_DELIVER_PENDING_EXCEPTION
/*
* Called by managed code to resolve a static field and load a 64-bit primitive value.
*/
@@ -827,7 +915,7 @@
RESTORE_SAVE_REFS_ONLY_FRAME
REFRESH_MARKING_REGISTER
cbnz r2, 1f @ success if no exception pending
- bx lr @ return on success
+ DEOPT_OR_RETURN r2 @ check if deopt is required or return
1:
DELIVER_PENDING_EXCEPTION
END art_quick_get64_static
@@ -835,12 +923,12 @@
/*
* Called by managed code to resolve an instance field and load a non-wide value.
*/
-TWO_ARG_REF_DOWNCALL art_quick_get_byte_instance, artGetByteInstanceFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_R1
-TWO_ARG_REF_DOWNCALL art_quick_get_boolean_instance, artGetBooleanInstanceFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_R1
-TWO_ARG_REF_DOWNCALL art_quick_get_short_instance, artGetShortInstanceFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_R1
-TWO_ARG_REF_DOWNCALL art_quick_get_char_instance, artGetCharInstanceFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_R1
-TWO_ARG_REF_DOWNCALL art_quick_get32_instance, artGet32InstanceFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_R1
-TWO_ARG_REF_DOWNCALL art_quick_get_obj_instance, artGetObjInstanceFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_R1
+TWO_ARG_REF_DOWNCALL art_quick_get_byte_instance, artGetByteInstanceFromCompiledCode, RETURN_OR_DEOPT_OR_DELIVER_PENDING_EXCEPTION
+TWO_ARG_REF_DOWNCALL art_quick_get_boolean_instance, artGetBooleanInstanceFromCompiledCode, RETURN_OR_DEOPT_OR_DELIVER_PENDING_EXCEPTION
+TWO_ARG_REF_DOWNCALL art_quick_get_short_instance, artGetShortInstanceFromCompiledCode, RETURN_OR_DEOPT_OR_DELIVER_PENDING_EXCEPTION
+TWO_ARG_REF_DOWNCALL art_quick_get_char_instance, artGetCharInstanceFromCompiledCode, RETURN_OR_DEOPT_OR_DELIVER_PENDING_EXCEPTION
+TWO_ARG_REF_DOWNCALL art_quick_get32_instance, artGet32InstanceFromCompiledCode, RETURN_OR_DEOPT_OR_DELIVER_PENDING_EXCEPTION
+TWO_ARG_REF_DOWNCALL art_quick_get_obj_instance, artGetObjInstanceFromCompiledCode, RETURN_OR_DEOPT_OR_DELIVER_PENDING_EXCEPTION
/*
* Called by managed code to resolve an instance field and load a 64-bit primitive value.
*/
@@ -853,7 +941,7 @@
RESTORE_SAVE_REFS_ONLY_FRAME
REFRESH_MARKING_REGISTER
cbnz r2, 1f @ success if no exception pending
- bx lr @ return on success
+ DEOPT_OR_RETURN r2 @ check if deopt is required or return
1:
DELIVER_PENDING_EXCEPTION
END art_quick_get64_instance
@@ -888,8 +976,7 @@
.cfi_adjust_cfa_offset -16
RESTORE_SAVE_REFS_ONLY_FRAME @ TODO: we can clearly save an add here
REFRESH_MARKING_REGISTER
- RETURN_IF_RESULT_IS_ZERO
- DELIVER_PENDING_EXCEPTION
+ RETURN_IF_RESULT_IS_ZERO_OR_DELIVER
END art_quick_set64_instance
.extern artSet64StaticFromCompiledCode
@@ -903,8 +990,7 @@
.cfi_adjust_cfa_offset -16
RESTORE_SAVE_REFS_ONLY_FRAME @ TODO: we can clearly save an add here
REFRESH_MARKING_REGISTER
- RETURN_IF_RESULT_IS_ZERO
- DELIVER_PENDING_EXCEPTION
+ RETURN_IF_RESULT_IS_ZERO_OR_DELIVER
END art_quick_set64_static
// Generate the allocation entrypoints for each allocator.
@@ -1037,7 +1123,7 @@
bl \cxx_name @ (mirror::Class* cls, Thread*)
RESTORE_SAVE_REFS_ONLY_FRAME
REFRESH_MARKING_REGISTER
- RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+ RETURN_IF_RESULT_IS_NON_ZERO_OR_DEOPT_OR_DELIVER
END \c_name
.endm
@@ -1122,7 +1208,7 @@
bl \entrypoint // (mirror::Class* klass, Thread*)
RESTORE_SAVE_REFS_ONLY_FRAME
REFRESH_MARKING_REGISTER
- RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+ RETURN_IF_RESULT_IS_NON_ZERO_OR_DEOPT_OR_DELIVER
END \name
.endm
@@ -1195,7 +1281,7 @@
bl \entrypoint
RESTORE_SAVE_REFS_ONLY_FRAME
REFRESH_MARKING_REGISTER
- RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+ RETURN_IF_RESULT_IS_NON_ZERO_OR_DEOPT_OR_DELIVER
END \name
.endm
@@ -1883,7 +1969,7 @@
bl artStringBuilderAppend @ (uint32_t, const unit32_t*, Thread*)
RESTORE_SAVE_REFS_ONLY_FRAME
REFRESH_MARKING_REGISTER
- RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+ RETURN_IF_RESULT_IS_NON_ZERO_OR_DEOPT_OR_DELIVER
END art_quick_string_builder_append
/*
@@ -1895,13 +1981,9 @@
*
* If `reg` is different from `r0`, the generated function follows a
* non-standard runtime calling convention:
- * - register `reg` is used to pass the (sole) argument of this
- * function (instead of R0);
- * - register `reg` is used to return the result of this function
- * (instead of R0);
- * - R0 is treated like a normal (non-argument) caller-save register;
- * - everything else is the same as in the standard runtime calling
- * convention (e.g. standard callee-save registers are preserved).
+ * - register `reg` (which may be different from R0) is used to pass the (sole) argument,
+ * - register `reg` (which may be different from R0) is used to return the result,
+ * - all other registers are callee-save (the values they hold are preserved).
*/
.macro READ_BARRIER_MARK_REG name, reg
ENTRY \name
diff --git a/runtime/arch/arm64/asm_support_arm64.h b/runtime/arch/arm64/asm_support_arm64.h
index 887ee02..5a285a4 100644
--- a/runtime/arch/arm64/asm_support_arm64.h
+++ b/runtime/arch/arm64/asm_support_arm64.h
@@ -19,6 +19,7 @@
#include "asm_support.h"
+// TODO(mythria): Change these to use constants from callee_save_frame_arm64.h
#define CALLEE_SAVES_SIZE (12 * 8 + 8 * 8)
// +8 for the ArtMethod, +8 for alignment.
#define FRAME_SIZE_SAVE_ALL_CALLEE_SAVES (CALLEE_SAVES_SIZE + 16)
@@ -27,6 +28,8 @@
#define FRAME_SIZE_SAVE_EVERYTHING 512
#define FRAME_SIZE_SAVE_EVERYTHING_FOR_CLINIT FRAME_SIZE_SAVE_EVERYTHING
#define FRAME_SIZE_SAVE_EVERYTHING_FOR_SUSPEND_CHECK FRAME_SIZE_SAVE_EVERYTHING
+#define SAVE_EVERYTHING_FRAME_X0_OFFSET \
+ (FRAME_SIZE_SAVE_EVERYTHING - CALLEE_SAVE_EVERYTHING_NUM_CORE_SPILLS * POINTER_SIZE)
// The offset from art_quick_read_barrier_mark_introspection to the array switch cases,
// i.e. art_quick_read_barrier_mark_introspection_arrays.
diff --git a/runtime/arch/arm64/jni_entrypoints_arm64.S b/runtime/arch/arm64/jni_entrypoints_arm64.S
index 463767c..b3ea40d 100644
--- a/runtime/arch/arm64/jni_entrypoints_arm64.S
+++ b/runtime/arch/arm64/jni_entrypoints_arm64.S
@@ -103,7 +103,7 @@
// Call artFindNativeMethod() for normal native and artFindNativeMethodRunnable()
// for @FastNative or @CriticalNative.
ldr xIP0, [x0, #THREAD_TOP_QUICK_FRAME_OFFSET] // uintptr_t tagged_quick_frame
- bic xIP0, xIP0, #1 // ArtMethod** sp
+ bic xIP0, xIP0, #TAGGED_JNI_SP_MASK // ArtMethod** sp
ldr xIP0, [xIP0] // ArtMethod* method
ldr xIP0, [xIP0, #ART_METHOD_ACCESS_FLAGS_OFFSET] // uint32_t access_flags
mov xIP1, #(ACCESS_FLAGS_METHOD_IS_FAST_NATIVE | ACCESS_FLAGS_METHOD_IS_CRITICAL_NATIVE)
diff --git a/runtime/arch/arm64/quick_entrypoints_arm64.S b/runtime/arch/arm64/quick_entrypoints_arm64.S
index d8c91e1..134c327 100644
--- a/runtime/arch/arm64/quick_entrypoints_arm64.S
+++ b/runtime/arch/arm64/quick_entrypoints_arm64.S
@@ -194,26 +194,56 @@
RESTORE_SAVE_EVERYTHING_FRAME_KEEP_X0
.endm
-.macro RETURN_IF_RESULT_IS_ZERO
- cbnz x0, 1f // result non-zero branch over
- ret // return
-1:
+.macro RETURN_OR_DEOPT_OR_DELIVER_PENDING_EXCEPTION
+ ldr x1, [xSELF, # THREAD_EXCEPTION_OFFSET] // Get exception field.
+ cbnz x1, 1f
+ DEOPT_OR_RETURN x1 // Check if deopt is required
+1: // deliver exception on current thread
+ DELIVER_PENDING_EXCEPTION
.endm
-.macro RETURN_IF_RESULT_IS_NON_ZERO
- cbz x0, 1f // result zero branch over
- ret // return
-1:
+.macro DEOPT_OR_RETURN temp, is_ref = 0
+ ldr \temp, [xSELF, #THREAD_DEOPT_CHECK_REQUIRED_OFFSET]
+ cbnz \temp, 2f
+ ret
+2:
+ SETUP_SAVE_EVERYTHING_FRAME
+ mov x2, \is_ref // pass if result is a reference
+ mov x1, x0 // pass the result
+ mov x0, xSELF // Thread::Current
+ bl artDeoptimizeIfNeeded
+ .cfi_remember_state
+ RESTORE_SAVE_EVERYTHING_FRAME
+ REFRESH_MARKING_REGISTER
+ ret
+ CFI_RESTORE_STATE_AND_DEF_CFA sp, FRAME_SIZE_SAVE_EVERYTHING
.endm
-// Same as above with x1. This is helpful in stubs that want to avoid clobbering another register.
-.macro RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
- RETURN_OR_DELIVER_PENDING_EXCEPTION_REG x1
+.macro DEOPT_OR_RESTORE_SAVE_EVERYTHING_FRAME_AND_RETURN_X0 temp, is_ref
+ ldr \temp, [xSELF, #THREAD_DEOPT_CHECK_REQUIRED_OFFSET]
+ cbnz \temp, 2f
+ .cfi_remember_state
+ RESTORE_SAVE_EVERYTHING_FRAME_KEEP_X0
+ REFRESH_MARKING_REGISTER
+ ret
+ CFI_RESTORE_STATE_AND_DEF_CFA sp, FRAME_SIZE_SAVE_EVERYTHING
+2:
+ str x0, [sp, #SAVE_EVERYTHING_FRAME_X0_OFFSET] // update result in the frame
+ mov x2, \is_ref // pass if result is a reference
+ mov x1, x0 // pass the result
+ mov x0, xSELF // Thread::Current
+ bl artDeoptimizeIfNeeded
+ .cfi_remember_state
+ RESTORE_SAVE_EVERYTHING_FRAME
+ REFRESH_MARKING_REGISTER
+ ret
+ CFI_RESTORE_STATE_AND_DEF_CFA sp, FRAME_SIZE_SAVE_EVERYTHING
.endm
+
.macro RETURN_IF_W0_IS_ZERO_OR_DELIVER
cbnz w0, 1f // result non-zero branch over
- ret // return
+ DEOPT_OR_RETURN x1
1:
DELIVER_PENDING_EXCEPTION
.endm
@@ -999,25 +1029,30 @@
.cfi_restore \xReg2
.endm
- /*
- * Macro to insert read barrier, only used in art_quick_aput_obj.
- * xDest, wDest and xObj are registers, offset is a defined literal such as
- * MIRROR_OBJECT_CLASS_OFFSET. Dest needs both x and w versions of the same register to handle
- * name mismatch between instructions. This macro uses the lower 32b of register when possible.
- * TODO: When read barrier has a fast path, add heap unpoisoning support for the fast path.
- */
-.macro READ_BARRIER xDest, wDest, xObj, xTemp, wTemp, offset, number
+ // Helper macros for `art_quick_aput_obj`.
#ifdef USE_READ_BARRIER
-# ifdef USE_BAKER_READ_BARRIER
- ldr \wTemp, [\xObj, #MIRROR_OBJECT_LOCK_WORD_OFFSET]
- tbnz \wTemp, #LOCK_WORD_READ_BARRIER_STATE_SHIFT, .Lrb_slowpath\number
+#ifdef USE_BAKER_READ_BARRIER
+.macro BAKER_RB_CHECK_GRAY_BIT_AND_LOAD wDest, xObj, offset, gray_slow_path_label
+ ldr wIP0, [\xObj, #MIRROR_OBJECT_LOCK_WORD_OFFSET]
+ tbnz wIP0, #LOCK_WORD_READ_BARRIER_STATE_SHIFT, \gray_slow_path_label
// False dependency to avoid needing load/load fence.
- add \xObj, \xObj, \xTemp, lsr #32
- ldr \wDest, [\xObj, #\offset] // Heap reference = 32b. This also zero-extends to \xDest.
+ add \xObj, \xObj, xIP0, lsr #32
+ ldr \wDest, [\xObj, #\offset] // Heap reference = 32b; zero-extends to xN.
UNPOISON_HEAP_REF \wDest
- b .Lrb_exit\number
-# endif // USE_BAKER_READ_BARRIER
-.Lrb_slowpath\number:
+.endm
+
+.macro BAKER_RB_LOAD_AND_MARK wDest, xObj, offset, mark_function
+ ldr \wDest, [\xObj, #\offset] // Heap reference = 32b; zero-extends to xN.
+ UNPOISON_HEAP_REF \wDest
+ // Save LR in a register preserved by `art_quick_read_barrier_mark_regNN`
+ // and unused by the `art_quick_aput_obj`.
+ mov x5, lr
+ bl \mark_function
+ mov lr, x5 // Restore LR.
+.endm
+#else // USE_BAKER_READ_BARRIER
+ .extern artReadBarrierSlow
+.macro READ_BARRIER_SLOW xDest, wDest, xObj, offset
// Store registers used in art_quick_aput_obj (x0-x4, LR), stack is 16B aligned.
SAVE_TWO_REGS_INCREASE_FRAME x0, x1, 48
SAVE_TWO_REGS x2, x3, 16
@@ -1042,41 +1077,44 @@
POP_REG_NE x4, 32, \xDest
RESTORE_REG xLR, 40
DECREASE_FRAME 48
-.Lrb_exit\number:
-#else
- ldr \wDest, [\xObj, #\offset] // Heap reference = 32b. This also zero-extends to \xDest.
- UNPOISON_HEAP_REF \wDest
-#endif // USE_READ_BARRIER
.endm
+#endif // USE_BAKER_READ_BARRIER
+#endif // USE_READ_BARRIER
-#ifdef USE_READ_BARRIER
- .extern artReadBarrierSlow
-#endif
ENTRY art_quick_aput_obj
- cbz x2, .Ldo_aput_null
- READ_BARRIER x3, w3, x0, x3, w3, MIRROR_OBJECT_CLASS_OFFSET, 0 // Heap reference = 32b
- // This also zero-extends to x3
- READ_BARRIER x3, w3, x3, x4, w4, MIRROR_CLASS_COMPONENT_TYPE_OFFSET, 1 // Heap reference = 32b
- // This also zero-extends to x3
- READ_BARRIER x4, w4, x2, x4, w4, MIRROR_OBJECT_CLASS_OFFSET, 2 // Heap reference = 32b
- // This also zero-extends to x4
+ cbz x2, .Laput_obj_null
+#if defined(USE_READ_BARRIER) && !defined(USE_BAKER_READ_BARRIER)
+ READ_BARRIER_SLOW x3, w3, x0, MIRROR_OBJECT_CLASS_OFFSET
+ READ_BARRIER_SLOW x3, w3, x3, MIRROR_CLASS_COMPONENT_TYPE_OFFSET
+ READ_BARRIER_SLOW x4, w4, x2, MIRROR_OBJECT_CLASS_OFFSET
+#else // !defined(USE_READ_BARRIER) || defined(USE_BAKER_READ_BARRIER)
+#ifdef USE_READ_BARRIER
+ cbnz wMR, .Laput_obj_gc_marking
+#endif // USE_READ_BARRIER
+ ldr w3, [x0, #MIRROR_OBJECT_CLASS_OFFSET] // Heap reference = 32b; zero-extends to x3.
+ UNPOISON_HEAP_REF w3
+ ldr w3, [x3, #MIRROR_CLASS_COMPONENT_TYPE_OFFSET] // Heap reference = 32b; zero-extends to x3.
+ UNPOISON_HEAP_REF w3
+ ldr w4, [x2, #MIRROR_OBJECT_CLASS_OFFSET] // Heap reference = 32b; zero-extends to x4.
+ UNPOISON_HEAP_REF w4
+#endif // !defined(USE_READ_BARRIER) || defined(USE_BAKER_READ_BARRIER)
cmp w3, w4 // value's type == array's component type - trivial assignability
- bne .Lcheck_assignability
-.Ldo_aput:
+ bne .Laput_obj_check_assignability
+.Laput_obj_store:
add x3, x0, #MIRROR_OBJECT_ARRAY_DATA_OFFSET
- // "Compress" = do nothing
POISON_HEAP_REF w2
- str w2, [x3, x1, lsl #2] // Heap reference = 32b
+ str w2, [x3, x1, lsl #2] // Heap reference = 32b.
ldr x3, [xSELF, #THREAD_CARD_TABLE_OFFSET]
lsr x0, x0, #CARD_TABLE_CARD_SHIFT
strb w3, [x3, x0]
ret
-.Ldo_aput_null:
+
+.Laput_obj_null:
add x3, x0, #MIRROR_OBJECT_ARRAY_DATA_OFFSET
- // "Compress" = do nothing
- str w2, [x3, x1, lsl #2] // Heap reference = 32b
+ str w2, [x3, x1, lsl #2] // Heap reference = 32b.
ret
-.Lcheck_assignability:
+
+.Laput_obj_check_assignability:
// Store arguments and link register
SAVE_TWO_REGS_INCREASE_FRAME x0, x1, 32
SAVE_TWO_REGS x2, xLR, 16
@@ -1087,7 +1125,7 @@
bl artIsAssignableFromCode
// Check for exception
- cbz x0, .Lthrow_array_store_exception
+ cbz x0, .Laput_obj_throw_array_store_exception
// Restore
.cfi_remember_state
@@ -1095,23 +1133,56 @@
RESTORE_TWO_REGS_DECREASE_FRAME x0, x1, 32
add x3, x0, #MIRROR_OBJECT_ARRAY_DATA_OFFSET
- // "Compress" = do nothing
POISON_HEAP_REF w2
- str w2, [x3, x1, lsl #2] // Heap reference = 32b
+ str w2, [x3, x1, lsl #2] // Heap reference = 32b.
ldr x3, [xSELF, #THREAD_CARD_TABLE_OFFSET]
lsr x0, x0, #CARD_TABLE_CARD_SHIFT
strb w3, [x3, x0]
ret
CFI_RESTORE_STATE_AND_DEF_CFA sp, 32
-.Lthrow_array_store_exception:
+
+.Laput_obj_throw_array_store_exception:
RESTORE_TWO_REGS x2, xLR, 16
RESTORE_TWO_REGS_DECREASE_FRAME x0, x1, 32
+#if defined(USE_READ_BARRIER) && defined(USE_BAKER_READ_BARRIER)
+ .cfi_remember_state
+#endif // defined(USE_READ_BARRIER) && defined(USE_BAKER_READ_BARRIER)
SETUP_SAVE_ALL_CALLEE_SAVES_FRAME
mov x1, x2 // Pass value.
mov x2, xSELF // Pass Thread::Current.
bl artThrowArrayStoreException // (Object*, Object*, Thread*).
- brk 0 // Unreached.
+ brk 0 // Unreachable.
+
+#if defined(USE_READ_BARRIER) && defined(USE_BAKER_READ_BARRIER)
+ CFI_RESTORE_STATE_AND_DEF_CFA sp, 0
+.Laput_obj_gc_marking:
+ BAKER_RB_CHECK_GRAY_BIT_AND_LOAD \
+ w3, x0, MIRROR_OBJECT_CLASS_OFFSET, .Laput_obj_mark_array_class
+.Laput_obj_mark_array_class_continue:
+ BAKER_RB_CHECK_GRAY_BIT_AND_LOAD \
+ w3, x3, MIRROR_CLASS_COMPONENT_TYPE_OFFSET, .Laput_obj_mark_array_element
+.Laput_obj_mark_array_element_continue:
+ BAKER_RB_CHECK_GRAY_BIT_AND_LOAD \
+ w4, x2, MIRROR_OBJECT_CLASS_OFFSET, .Laput_obj_mark_object_class
+.Laput_obj_mark_object_class_continue:
+ cmp w3, w4 // value's type == array's component type - trivial assignability
+ bne .Laput_obj_check_assignability
+ b .Laput_obj_store
+
+.Laput_obj_mark_array_class:
+ BAKER_RB_LOAD_AND_MARK w3, x0, MIRROR_OBJECT_CLASS_OFFSET, art_quick_read_barrier_mark_reg03
+ b .Laput_obj_mark_array_class_continue
+
+.Laput_obj_mark_array_element:
+ BAKER_RB_LOAD_AND_MARK \
+ w3, x3, MIRROR_CLASS_COMPONENT_TYPE_OFFSET, art_quick_read_barrier_mark_reg03
+ b .Laput_obj_mark_array_element_continue
+
+.Laput_obj_mark_object_class:
+ BAKER_RB_LOAD_AND_MARK w4, x2, MIRROR_OBJECT_CLASS_OFFSET, art_quick_read_barrier_mark_reg04
+ b .Laput_obj_mark_object_class_continue
+#endif // defined(USE_READ_BARRIER) && defined(USE_BAKER_READ_BARRIER)
END art_quick_aput_obj
// Macro to facilitate adding new allocation entrypoints.
@@ -1214,11 +1285,7 @@
mov x1, xSELF // pass Thread::Current
bl \entrypoint // (int32_t index, Thread* self)
cbz w0, 1f // If result is null, deliver the OOME.
- .cfi_remember_state
- RESTORE_SAVE_EVERYTHING_FRAME_KEEP_X0
- REFRESH_MARKING_REGISTER
- ret // return
- CFI_RESTORE_STATE_AND_DEF_CFA sp, FRAME_SIZE_SAVE_EVERYTHING
+ DEOPT_OR_RESTORE_SAVE_EVERYTHING_FRAME_AND_RETURN_X0 x1, /* is_ref= */ 1
1:
DELIVER_PENDING_EXCEPTION_FRAME_READY
END \name
@@ -1228,13 +1295,14 @@
ONE_ARG_SAVE_EVERYTHING_DOWNCALL \name, \entrypoint, RUNTIME_SAVE_EVERYTHING_FOR_CLINIT_METHOD_OFFSET
.endm
-.macro RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
- cbz w0, 1f // result zero branch over
- ret // return
+.macro RETURN_IF_RESULT_IS_NON_ZERO_OR_DEOPT_OR_DELIVER
+ cbz w0, 1f // result zero branch over
+ DEOPT_OR_RETURN x1, /*is_ref=*/1 // check for deopt or return
1:
DELIVER_PENDING_EXCEPTION
.endm
+
/*
* Entry from managed code that calls artHandleFillArrayDataFromCode and delivers exception on
* failure.
@@ -1256,21 +1324,21 @@
// Note: Functions `art{Get,Set}<Kind>{Static,Instance}FromCompiledCode` are
// defined with a macro in runtime/entrypoints/quick/quick_field_entrypoints.cc.
-ONE_ARG_REF_DOWNCALL art_quick_get_boolean_static, artGetBooleanStaticFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
-ONE_ARG_REF_DOWNCALL art_quick_get_byte_static, artGetByteStaticFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
-ONE_ARG_REF_DOWNCALL art_quick_get_char_static, artGetCharStaticFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
-ONE_ARG_REF_DOWNCALL art_quick_get_short_static, artGetShortStaticFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
-ONE_ARG_REF_DOWNCALL art_quick_get32_static, artGet32StaticFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
-ONE_ARG_REF_DOWNCALL art_quick_get64_static, artGet64StaticFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
-ONE_ARG_REF_DOWNCALL art_quick_get_obj_static, artGetObjStaticFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
+ONE_ARG_REF_DOWNCALL art_quick_get_boolean_static, artGetBooleanStaticFromCompiledCode, RETURN_OR_DEOPT_OR_DELIVER_PENDING_EXCEPTION
+ONE_ARG_REF_DOWNCALL art_quick_get_byte_static, artGetByteStaticFromCompiledCode, RETURN_OR_DEOPT_OR_DELIVER_PENDING_EXCEPTION
+ONE_ARG_REF_DOWNCALL art_quick_get_char_static, artGetCharStaticFromCompiledCode, RETURN_OR_DEOPT_OR_DELIVER_PENDING_EXCEPTION
+ONE_ARG_REF_DOWNCALL art_quick_get_short_static, artGetShortStaticFromCompiledCode, RETURN_OR_DEOPT_OR_DELIVER_PENDING_EXCEPTION
+ONE_ARG_REF_DOWNCALL art_quick_get32_static, artGet32StaticFromCompiledCode, RETURN_OR_DEOPT_OR_DELIVER_PENDING_EXCEPTION
+ONE_ARG_REF_DOWNCALL art_quick_get64_static, artGet64StaticFromCompiledCode, RETURN_OR_DEOPT_OR_DELIVER_PENDING_EXCEPTION
+ONE_ARG_REF_DOWNCALL art_quick_get_obj_static, artGetObjStaticFromCompiledCode, RETURN_OR_DEOPT_OR_DELIVER_PENDING_EXCEPTION
-TWO_ARG_REF_DOWNCALL art_quick_get_boolean_instance, artGetBooleanInstanceFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
-TWO_ARG_REF_DOWNCALL art_quick_get_byte_instance, artGetByteInstanceFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
-TWO_ARG_REF_DOWNCALL art_quick_get_char_instance, artGetCharInstanceFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
-TWO_ARG_REF_DOWNCALL art_quick_get_short_instance, artGetShortInstanceFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
-TWO_ARG_REF_DOWNCALL art_quick_get32_instance, artGet32InstanceFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
-TWO_ARG_REF_DOWNCALL art_quick_get64_instance, artGet64InstanceFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
-TWO_ARG_REF_DOWNCALL art_quick_get_obj_instance, artGetObjInstanceFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
+TWO_ARG_REF_DOWNCALL art_quick_get_boolean_instance, artGetBooleanInstanceFromCompiledCode, RETURN_OR_DEOPT_OR_DELIVER_PENDING_EXCEPTION
+TWO_ARG_REF_DOWNCALL art_quick_get_byte_instance, artGetByteInstanceFromCompiledCode, RETURN_OR_DEOPT_OR_DELIVER_PENDING_EXCEPTION
+TWO_ARG_REF_DOWNCALL art_quick_get_char_instance, artGetCharInstanceFromCompiledCode, RETURN_OR_DEOPT_OR_DELIVER_PENDING_EXCEPTION
+TWO_ARG_REF_DOWNCALL art_quick_get_short_instance, artGetShortInstanceFromCompiledCode, RETURN_OR_DEOPT_OR_DELIVER_PENDING_EXCEPTION
+TWO_ARG_REF_DOWNCALL art_quick_get32_instance, artGet32InstanceFromCompiledCode, RETURN_OR_DEOPT_OR_DELIVER_PENDING_EXCEPTION
+TWO_ARG_REF_DOWNCALL art_quick_get64_instance, artGet64InstanceFromCompiledCode, RETURN_OR_DEOPT_OR_DELIVER_PENDING_EXCEPTION
+TWO_ARG_REF_DOWNCALL art_quick_get_obj_instance, artGetObjInstanceFromCompiledCode, RETURN_OR_DEOPT_OR_DELIVER_PENDING_EXCEPTION
TWO_ARG_REF_DOWNCALL art_quick_set8_static, artSet8StaticFromCompiledCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER
TWO_ARG_REF_DOWNCALL art_quick_set16_static, artSet16StaticFromCompiledCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER
@@ -1410,7 +1478,7 @@
bl \cxx_name
RESTORE_SAVE_REFS_ONLY_FRAME
REFRESH_MARKING_REGISTER
- RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+ RETURN_IF_RESULT_IS_NON_ZERO_OR_DEOPT_OR_DELIVER
END \c_name
.endm
@@ -1465,7 +1533,7 @@
bl \entrypoint // (mirror::Class*, Thread*)
RESTORE_SAVE_REFS_ONLY_FRAME
REFRESH_MARKING_REGISTER
- RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+ RETURN_IF_RESULT_IS_NON_ZERO_OR_DEOPT_OR_DELIVER
END \name
.endm
@@ -1539,7 +1607,7 @@
bl \entrypoint
RESTORE_SAVE_REFS_ONLY_FRAME
REFRESH_MARKING_REGISTER
- RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+ RETURN_IF_RESULT_IS_NON_ZERO_OR_DEOPT_OR_DELIVER
END \name
.endm
@@ -2102,7 +2170,7 @@
bl artStringBuilderAppend // (uint32_t, const unit32_t*, Thread*)
RESTORE_SAVE_REFS_ONLY_FRAME
REFRESH_MARKING_REGISTER
- RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+ RETURN_IF_RESULT_IS_NON_ZERO_OR_DEOPT_OR_DELIVER
END art_quick_string_builder_append
/*
@@ -2111,15 +2179,10 @@
* `wreg` (corresponding to X register `xreg`), saving and restoring
* all caller-save registers.
*
- * If `wreg` is different from `w0`, the generated function follows a
- * non-standard runtime calling convention:
- * - register `wreg` is used to pass the (sole) argument of this
- * function (instead of W0);
- * - register `wreg` is used to return the result of this function
- * (instead of W0);
- * - W0 is treated like a normal (non-argument) caller-save register;
- * - everything else is the same as in the standard runtime calling
- * convention (e.g. standard callee-save registers are preserved).
+ * The generated function follows a non-standard runtime calling convention:
+ * - register `reg` (which may be different from W0) is used to pass the (sole) argument,
+ * - register `reg` (which may be different from W0) is used to return the result,
+ * - all other registers are callee-save (the values they hold are preserved).
*/
.macro READ_BARRIER_MARK_REG name, wreg, xreg
ENTRY \name
diff --git a/runtime/arch/quick_alloc_entrypoints.S b/runtime/arch/quick_alloc_entrypoints.S
index 32888ed..5d4b24b 100644
--- a/runtime/arch/quick_alloc_entrypoints.S
+++ b/runtime/arch/quick_alloc_entrypoints.S
@@ -16,27 +16,27 @@
.macro GENERATE_ALLOC_ENTRYPOINTS c_suffix, cxx_suffix
// Called by managed code to allocate an object of a resolved class.
-ONE_ARG_DOWNCALL art_quick_alloc_object_resolved\c_suffix, artAllocObjectFromCodeResolved\cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+ONE_ARG_DOWNCALL art_quick_alloc_object_resolved\c_suffix, artAllocObjectFromCodeResolved\cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DEOPT_OR_DELIVER
// Called by managed code to allocate an object of an initialized class.
-ONE_ARG_DOWNCALL art_quick_alloc_object_initialized\c_suffix, artAllocObjectFromCodeInitialized\cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+ONE_ARG_DOWNCALL art_quick_alloc_object_initialized\c_suffix, artAllocObjectFromCodeInitialized\cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DEOPT_OR_DELIVER
// Called by managed code to allocate an object when the caller doesn't know whether it has access
// to the created type.
-ONE_ARG_DOWNCALL art_quick_alloc_object_with_checks\c_suffix, artAllocObjectFromCodeWithChecks\cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+ONE_ARG_DOWNCALL art_quick_alloc_object_with_checks\c_suffix, artAllocObjectFromCodeWithChecks\cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DEOPT_OR_DELIVER
// Called by managed code to allocate a string if it could not be removed by any optimizations
-ONE_ARG_DOWNCALL art_quick_alloc_string_object\c_suffix, artAllocStringObject\cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+ONE_ARG_DOWNCALL art_quick_alloc_string_object\c_suffix, artAllocStringObject\cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DEOPT_OR_DELIVER
// Called by managed code to allocate an array of a resolve class.
-TWO_ARG_DOWNCALL art_quick_alloc_array_resolved\c_suffix, artAllocArrayFromCodeResolved\cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+TWO_ARG_DOWNCALL art_quick_alloc_array_resolved\c_suffix, artAllocArrayFromCodeResolved\cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DEOPT_OR_DELIVER
// Called by managed code to allocate a string from bytes
-FOUR_ARG_DOWNCALL art_quick_alloc_string_from_bytes\c_suffix, artAllocStringFromBytesFromCode\cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+FOUR_ARG_DOWNCALL art_quick_alloc_string_from_bytes\c_suffix, artAllocStringFromBytesFromCode\cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DEOPT_OR_DELIVER
// Called by managed code to allocate a string from chars
-THREE_ARG_DOWNCALL art_quick_alloc_string_from_chars\c_suffix, artAllocStringFromCharsFromCode\cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+THREE_ARG_DOWNCALL art_quick_alloc_string_from_chars\c_suffix, artAllocStringFromCharsFromCode\cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DEOPT_OR_DELIVER
// Called by managed code to allocate a string from string
-ONE_ARG_DOWNCALL art_quick_alloc_string_from_string\c_suffix, artAllocStringFromStringFromCode\cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+ONE_ARG_DOWNCALL art_quick_alloc_string_from_string\c_suffix, artAllocStringFromStringFromCode\cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DEOPT_OR_DELIVER
-TWO_ARG_DOWNCALL art_quick_alloc_array_resolved8\c_suffix, artAllocArrayFromCodeResolved\cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
-TWO_ARG_DOWNCALL art_quick_alloc_array_resolved16\c_suffix, artAllocArrayFromCodeResolved\cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
-TWO_ARG_DOWNCALL art_quick_alloc_array_resolved32\c_suffix, artAllocArrayFromCodeResolved\cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
-TWO_ARG_DOWNCALL art_quick_alloc_array_resolved64\c_suffix, artAllocArrayFromCodeResolved\cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+TWO_ARG_DOWNCALL art_quick_alloc_array_resolved8\c_suffix, artAllocArrayFromCodeResolved\cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DEOPT_OR_DELIVER
+TWO_ARG_DOWNCALL art_quick_alloc_array_resolved16\c_suffix, artAllocArrayFromCodeResolved\cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DEOPT_OR_DELIVER
+TWO_ARG_DOWNCALL art_quick_alloc_array_resolved32\c_suffix, artAllocArrayFromCodeResolved\cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DEOPT_OR_DELIVER
+TWO_ARG_DOWNCALL art_quick_alloc_array_resolved64\c_suffix, artAllocArrayFromCodeResolved\cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DEOPT_OR_DELIVER
.endm
.macro GENERATE_ALL_ALLOC_ENTRYPOINTS
@@ -58,29 +58,29 @@
// GENERATE_ALL_ALLOC_ENTRYPOINTS for selectively implementing allocation fast paths in
// hand-written assembly.
#define GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(c_suffix, cxx_suffix) \
- ONE_ARG_DOWNCALL art_quick_alloc_object_resolved ## c_suffix, artAllocObjectFromCodeResolved ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+ ONE_ARG_DOWNCALL art_quick_alloc_object_resolved ## c_suffix, artAllocObjectFromCodeResolved ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DEOPT_OR_DELIVER
#define GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(c_suffix, cxx_suffix) \
- ONE_ARG_DOWNCALL art_quick_alloc_object_initialized ## c_suffix, artAllocObjectFromCodeInitialized ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+ ONE_ARG_DOWNCALL art_quick_alloc_object_initialized ## c_suffix, artAllocObjectFromCodeInitialized ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DEOPT_OR_DELIVER
#define GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(c_suffix, cxx_suffix) \
- ONE_ARG_DOWNCALL art_quick_alloc_object_with_checks ## c_suffix, artAllocObjectFromCodeWithChecks ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+ ONE_ARG_DOWNCALL art_quick_alloc_object_with_checks ## c_suffix, artAllocObjectFromCodeWithChecks ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DEOPT_OR_DELIVER
#define GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_OBJECT(c_suffix, cxx_suffix) \
- ONE_ARG_DOWNCALL art_quick_alloc_string_object ## c_suffix, artAllocStringObject ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+ ONE_ARG_DOWNCALL art_quick_alloc_string_object ## c_suffix, artAllocStringObject ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DEOPT_OR_DELIVER
#define GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(c_suffix, cxx_suffix) \
- FOUR_ARG_DOWNCALL art_quick_alloc_string_from_bytes ## c_suffix, artAllocStringFromBytesFromCode ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+ FOUR_ARG_DOWNCALL art_quick_alloc_string_from_bytes ## c_suffix, artAllocStringFromBytesFromCode ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DEOPT_OR_DELIVER
#define GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(c_suffix, cxx_suffix) \
- THREE_ARG_DOWNCALL art_quick_alloc_string_from_chars ## c_suffix, artAllocStringFromCharsFromCode ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+ THREE_ARG_DOWNCALL art_quick_alloc_string_from_chars ## c_suffix, artAllocStringFromCharsFromCode ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DEOPT_OR_DELIVER
#define GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(c_suffix, cxx_suffix) \
- ONE_ARG_DOWNCALL art_quick_alloc_string_from_string ## c_suffix, artAllocStringFromStringFromCode ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+ ONE_ARG_DOWNCALL art_quick_alloc_string_from_string ## c_suffix, artAllocStringFromStringFromCode ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DEOPT_OR_DELIVER
#define GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED(c_suffix, cxx_suffix) \
- TWO_ARG_DOWNCALL art_quick_alloc_array_resolved ## c_suffix, artAllocArrayFromCodeResolved ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+ TWO_ARG_DOWNCALL art_quick_alloc_array_resolved ## c_suffix, artAllocArrayFromCodeResolved ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DEOPT_OR_DELIVER
#define GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED8(c_suffix, cxx_suffix) \
- TWO_ARG_DOWNCALL art_quick_alloc_array_resolved8 ## c_suffix, artAllocArrayFromCodeResolved ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+ TWO_ARG_DOWNCALL art_quick_alloc_array_resolved8 ## c_suffix, artAllocArrayFromCodeResolved ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DEOPT_OR_DELIVER
#define GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED16(c_suffix, cxx_suffix) \
- TWO_ARG_DOWNCALL art_quick_alloc_array_resolved16 ## c_suffix, artAllocArrayFromCodeResolved ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+ TWO_ARG_DOWNCALL art_quick_alloc_array_resolved16 ## c_suffix, artAllocArrayFromCodeResolved ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DEOPT_OR_DELIVER
#define GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED32(c_suffix, cxx_suffix) \
- TWO_ARG_DOWNCALL art_quick_alloc_array_resolved32 ## c_suffix, artAllocArrayFromCodeResolved ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+ TWO_ARG_DOWNCALL art_quick_alloc_array_resolved32 ## c_suffix, artAllocArrayFromCodeResolved ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DEOPT_OR_DELIVER
#define GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED64(c_suffix, cxx_suffix) \
- TWO_ARG_DOWNCALL art_quick_alloc_array_resolved64 ## c_suffix, artAllocArrayFromCodeResolved ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+ TWO_ARG_DOWNCALL art_quick_alloc_array_resolved64 ## c_suffix, artAllocArrayFromCodeResolved ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DEOPT_OR_DELIVER
.macro GENERATE_ALLOC_ENTRYPOINTS_FOR_REGION_TLAB_ALLOCATOR
// This is to be separately defined for each architecture to allow a hand-written assembly fast path.
diff --git a/runtime/arch/x86/asm_support_x86.h b/runtime/arch/x86/asm_support_x86.h
index 737d736..f688933 100644
--- a/runtime/arch/x86/asm_support_x86.h
+++ b/runtime/arch/x86/asm_support_x86.h
@@ -25,5 +25,7 @@
#define FRAME_SIZE_SAVE_EVERYTHING (48 + 64)
#define FRAME_SIZE_SAVE_EVERYTHING_FOR_CLINIT FRAME_SIZE_SAVE_EVERYTHING
#define FRAME_SIZE_SAVE_EVERYTHING_FOR_SUSPEND_CHECK FRAME_SIZE_SAVE_EVERYTHING
+#define SAVE_EVERYTHING_FRAME_EAX_OFFSET \
+ (FRAME_SIZE_SAVE_EVERYTHING - CALLEE_SAVE_EVERYTHING_NUM_CORE_SPILLS * POINTER_SIZE)
#endif // ART_RUNTIME_ARCH_X86_ASM_SUPPORT_X86_H_
diff --git a/runtime/arch/x86/jni_entrypoints_x86.S b/runtime/arch/x86/jni_entrypoints_x86.S
index d827509..4b43814 100644
--- a/runtime/arch/x86/jni_entrypoints_x86.S
+++ b/runtime/arch/x86/jni_entrypoints_x86.S
@@ -98,7 +98,7 @@
// for @FastNative or @CriticalNative.
movl (%esp), %eax // Thread* self
movl THREAD_TOP_QUICK_FRAME_OFFSET(%eax), %eax // uintptr_t tagged_quick_frame
- andl LITERAL(0xfffffffe), %eax // ArtMethod** sp
+ andl LITERAL(TAGGED_JNI_SP_MASK_TOGGLED32), %eax // ArtMethod** sp
movl (%eax), %eax // ArtMethod* method
testl LITERAL(ACCESS_FLAGS_METHOD_IS_FAST_NATIVE | ACCESS_FLAGS_METHOD_IS_CRITICAL_NATIVE), \
ART_METHOD_ACCESS_FLAGS_OFFSET(%eax)
diff --git a/runtime/arch/x86/quick_entrypoints_x86.S b/runtime/arch/x86/quick_entrypoints_x86.S
index 7f1311c..1a49fff 100644
--- a/runtime/arch/x86/quick_entrypoints_x86.S
+++ b/runtime/arch/x86/quick_entrypoints_x86.S
@@ -794,12 +794,9 @@
call CALLVAR(cxx_name) // cxx_name(arg1, Thread*)
addl MACRO_LITERAL(16), %esp // pop arguments
CFI_ADJUST_CFA_OFFSET(-16)
- testl %eax, %eax // If result is null, deliver the OOME.
+ testl %eax, %eax // If result is null deliver pending exception
jz 1f
- CFI_REMEMBER_STATE
- RESTORE_SAVE_EVERYTHING_FRAME_KEEP_EAX // restore frame up to return address
- ret // return
- CFI_RESTORE_STATE_AND_DEF_CFA esp, FRAME_SIZE_SAVE_EVERYTHING
+ DEOPT_OR_RESTORE_SAVE_EVERYTHING_FRAME_AND_RETURN_EAX ebx, /* is_ref= */1 // Check for deopt
1:
DELIVER_PENDING_EXCEPTION_FRAME_READY
END_FUNCTION VAR(c_name)
@@ -809,18 +806,72 @@
ONE_ARG_SAVE_EVERYTHING_DOWNCALL \c_name, \cxx_name, RUNTIME_SAVE_EVERYTHING_FOR_CLINIT_METHOD_OFFSET
END_MACRO
-MACRO0(RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER)
- testl %eax, %eax // eax == 0 ?
- jz 1f // if eax == 0 goto 1
- ret // return
-1: // deliver exception on current thread
+MACRO0(RETURN_IF_RESULT_IS_NON_ZERO_OR_DEOPT_OR_DELIVER)
+ testl %eax, %eax // eax == 0 ?
+ jz 1f // if eax == 0 goto 1
+ DEOPT_OR_RETURN ebx, /*is_ref=*/1 // check if deopt is required
+1: // deliver exception on current thread
DELIVER_PENDING_EXCEPTION
END_MACRO
+MACRO0(RETURN_OR_DEOPT_OR_DELIVER_PENDING_EXCEPTION)
+ cmpl MACRO_LITERAL(0),%fs:THREAD_EXCEPTION_OFFSET // exception field == 0 ?
+ jne 1f // if exception field != 0 goto 1
+ DEOPT_OR_RETURN ebx // check if deopt is required
+1: // deliver exception on current thread
+ DELIVER_PENDING_EXCEPTION
+END_MACRO
+
+MACRO2(DEOPT_OR_RETURN, temp, is_ref = 0)
+ cmpl LITERAL(0), %fs:THREAD_DEOPT_CHECK_REQUIRED_OFFSET
+ jne 2f
+ ret
+2:
+ SETUP_SAVE_EVERYTHING_FRAME \temp
+ subl MACRO_LITERAL(4), %esp // alignment padding
+ CFI_ADJUST_CFA_OFFSET(4)
+ pushl MACRO_LITERAL(\is_ref) // is_ref
+ CFI_ADJUST_CFA_OFFSET(4)
+ PUSH_ARG eax // result
+ pushl %fs:THREAD_SELF_OFFSET // Pass Thread::Current
+ CFI_ADJUST_CFA_OFFSET(4)
+ call SYMBOL(artDeoptimizeIfNeeded)
+ addl LITERAL(16), %esp // pop arguments
+ CFI_REMEMBER_STATE
+ RESTORE_SAVE_EVERYTHING_FRAME
+ ret
+ CFI_RESTORE_STATE_AND_DEF_CFA esp, FRAME_SIZE_SAVE_EVERYTHING
+END_MACRO
+
+MACRO2(DEOPT_OR_RESTORE_SAVE_EVERYTHING_FRAME_AND_RETURN_EAX, temp, is_ref = 0)
+ cmpl LITERAL(0), %fs:THREAD_DEOPT_CHECK_REQUIRED_OFFSET
+ jne 2f
+ CFI_REMEMBER_STATE
+ RESTORE_SAVE_EVERYTHING_FRAME_KEEP_EAX
+ ret
+ CFI_RESTORE_STATE_AND_DEF_CFA esp, FRAME_SIZE_SAVE_EVERYTHING
+2:
+ movl %eax, SAVE_EVERYTHING_FRAME_EAX_OFFSET(%esp) // update eax in the frame
+ subl MACRO_LITERAL(4), %esp // alignment padding
+ CFI_ADJUST_CFA_OFFSET(4)
+ pushl MACRO_LITERAL(\is_ref) // is_ref
+ CFI_ADJUST_CFA_OFFSET(4)
+ PUSH_ARG eax // result
+ pushl %fs:THREAD_SELF_OFFSET // Pass Thread::Current
+ CFI_ADJUST_CFA_OFFSET(4)
+ call SYMBOL(artDeoptimizeIfNeeded)
+ addl LITERAL(16), %esp // pop arguments
+ CFI_REMEMBER_STATE
+ RESTORE_SAVE_EVERYTHING_FRAME
+ ret
+ CFI_RESTORE_STATE_AND_DEF_CFA esp, FRAME_SIZE_SAVE_EVERYTHING
+END_MACRO
+
+
MACRO0(RETURN_IF_EAX_ZERO)
testl %eax, %eax // eax == 0 ?
jnz 1f // if eax != 0 goto 1
- ret // return
+ DEOPT_OR_RETURN ebx // check if deopt is needed
1: // deliver exception on current thread
DELIVER_PENDING_EXCEPTION
END_MACRO
@@ -927,7 +978,7 @@
addl LITERAL(16), %esp // pop arguments
CFI_ADJUST_CFA_OFFSET(-16)
RESTORE_SAVE_REFS_ONLY_FRAME // restore frame up to return address
- RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER // return or deliver exception
+ RETURN_IF_RESULT_IS_NON_ZERO_OR_DEOPT_OR_DELIVER // return or deliver exception
END_FUNCTION VAR(c_name)
END_MACRO
@@ -974,7 +1025,7 @@
addl LITERAL(16), %esp
CFI_ADJUST_CFA_OFFSET(-16)
RESTORE_SAVE_REFS_ONLY_FRAME // restore frame up to return address
- RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER // return or deliver exception
+ RETURN_IF_RESULT_IS_NON_ZERO_OR_DEOPT_OR_DELIVER // return or deliver exception
END_MACRO
MACRO2(ART_QUICK_ALLOC_OBJECT_TLAB, c_name, cxx_name)
@@ -1107,7 +1158,7 @@
addl LITERAL(16), %esp // pop arguments
CFI_ADJUST_CFA_OFFSET(-16)
RESTORE_SAVE_REFS_ONLY_FRAME // restore frame up to return address
- RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER // return or deliver exception
+ RETURN_IF_RESULT_IS_NON_ZERO_OR_DEOPT_OR_DELIVER // return or deliver exception
END_FUNCTION VAR(c_entrypoint)
END_MACRO
@@ -1254,126 +1305,102 @@
.endif
END_MACRO
- /*
- * Macro to insert read barrier, only used in art_quick_aput_obj.
- * obj_reg and dest_reg are registers, offset is a defined literal such as
- * MIRROR_OBJECT_CLASS_OFFSET.
- * pop_eax is a boolean flag, indicating if eax is popped after the call.
- * TODO: When read barrier has a fast path, add heap unpoisoning support for the fast path.
- */
-MACRO4(READ_BARRIER, obj_reg, offset, dest_reg, pop_eax)
-#ifdef USE_READ_BARRIER
- PUSH eax // save registers used in art_quick_aput_obj
- PUSH ebx
- PUSH edx
- PUSH ecx
- // Outgoing argument set up
- pushl MACRO_LITERAL((RAW_VAR(offset))) // pass offset, double parentheses are necessary
- CFI_ADJUST_CFA_OFFSET(4)
- PUSH RAW_VAR(obj_reg) // pass obj_reg
- PUSH eax // pass ref, just pass eax for now since parameter ref is unused
- call SYMBOL(artReadBarrierSlow) // artReadBarrierSlow(ref, obj_reg, offset)
- // No need to unpoison return value in eax, artReadBarrierSlow() would do the unpoisoning.
- .ifnc RAW_VAR(dest_reg), eax
- movl %eax, REG_VAR(dest_reg) // save loaded ref in dest_reg
- .endif
- addl MACRO_LITERAL(12), %esp // pop arguments
- CFI_ADJUST_CFA_OFFSET(-12)
- POP_REG_NE ecx, RAW_VAR(dest_reg) // Restore args except dest_reg
- POP_REG_NE edx, RAW_VAR(dest_reg)
- POP_REG_NE ebx, RAW_VAR(dest_reg)
- .ifc RAW_VAR(pop_eax), true
- POP_REG_NE eax, RAW_VAR(dest_reg)
- .endif
-#else
- movl RAW_VAR(offset)(REG_VAR(obj_reg)), REG_VAR(dest_reg)
- UNPOISON_HEAP_REF RAW_VAR(dest_reg)
-#endif // USE_READ_BARRIER
-END_MACRO
-
DEFINE_FUNCTION art_quick_aput_obj
test %edx, %edx // store of null
- jz .Ldo_aput_null
- READ_BARRIER eax, MIRROR_OBJECT_CLASS_OFFSET, ebx, true
- READ_BARRIER ebx, MIRROR_CLASS_COMPONENT_TYPE_OFFSET, ebx, true
- // value's type == array's component type - trivial assignability
-#if defined(USE_READ_BARRIER)
- READ_BARRIER edx, MIRROR_OBJECT_CLASS_OFFSET, eax, false
- cmpl %eax, %ebx
- POP eax // restore eax from the push in the beginning of READ_BARRIER macro
- // This asymmetric push/pop saves a push of eax and maintains stack alignment.
-#elif defined(USE_HEAP_POISONING)
- PUSH eax // save eax
- movl MIRROR_OBJECT_CLASS_OFFSET(%edx), %eax
- UNPOISON_HEAP_REF eax
- cmpl %eax, %ebx
- POP eax // restore eax
-#else
- cmpl MIRROR_OBJECT_CLASS_OFFSET(%edx), %ebx
-#endif
- jne .Lcheck_assignability
-.Ldo_aput:
+ jz .Laput_obj_null
+ movl MIRROR_OBJECT_CLASS_OFFSET(%eax), %ebx
+ UNPOISON_HEAP_REF ebx
+#ifdef USE_READ_BARRIER
+ cmpl LITERAL(0), %fs:THREAD_IS_GC_MARKING_OFFSET
+ jnz .Laput_obj_gc_marking
+#endif // USE_READ_BARRIER
+ movl MIRROR_CLASS_COMPONENT_TYPE_OFFSET(%ebx), %ebx
+ cmpl MIRROR_OBJECT_CLASS_OFFSET(%edx), %ebx // Both poisoned if heap poisoning is enabled.
+ jne .Laput_obj_check_assignability
+.Laput_obj_store:
POISON_HEAP_REF edx
movl %edx, MIRROR_OBJECT_ARRAY_DATA_OFFSET(%eax, %ecx, 4)
movl %fs:THREAD_CARD_TABLE_OFFSET, %edx
shrl LITERAL(CARD_TABLE_CARD_SHIFT), %eax
movb %dl, (%edx, %eax)
ret
-.Ldo_aput_null:
+
+.Laput_obj_null:
movl %edx, MIRROR_OBJECT_ARRAY_DATA_OFFSET(%eax, %ecx, 4)
ret
-.Lcheck_assignability:
- PUSH eax // save arguments
- PUSH ecx
- PUSH edx
-#if defined(USE_READ_BARRIER)
- subl LITERAL(4), %esp // alignment padding
- CFI_ADJUST_CFA_OFFSET(4)
- READ_BARRIER edx, MIRROR_OBJECT_CLASS_OFFSET, eax, true
- subl LITERAL(4), %esp // alignment padding
- CFI_ADJUST_CFA_OFFSET(4)
- PUSH eax // pass arg2 - type of the value to be stored
-#elif defined(USE_HEAP_POISONING)
- subl LITERAL(8), %esp // alignment padding
- CFI_ADJUST_CFA_OFFSET(8)
+
+.Laput_obj_check_assignability:
+ UNPOISON_HEAP_REF ebx // Unpoison array component type if poisoning is enabled.
+ PUSH_ARG eax // Save `art_quick_aput_obj()` arguments.
+ PUSH_ARG ecx
+ PUSH_ARG edx
+ INCREASE_FRAME 8 // Alignment padding.
+ // Pass arg2 - type of the value to be stored.
+#if defined(USE_HEAP_POISONING)
movl MIRROR_OBJECT_CLASS_OFFSET(%edx), %eax
UNPOISON_HEAP_REF eax
- PUSH eax // pass arg2 - type of the value to be stored
+ PUSH_ARG eax
#else
- subl LITERAL(8), %esp // alignment padding
- CFI_ADJUST_CFA_OFFSET(8)
- pushl MIRROR_OBJECT_CLASS_OFFSET(%edx) // pass arg2 - type of the value to be stored
+ pushl MIRROR_OBJECT_CLASS_OFFSET(%edx)
CFI_ADJUST_CFA_OFFSET(4)
#endif
- PUSH ebx // pass arg1 - component type of the array
+.Laput_obj_check_assignability_call:
+ PUSH_ARG ebx // Pass arg1 - component type of the array.
call SYMBOL(artIsAssignableFromCode) // (Class* a, Class* b)
- addl LITERAL(16), %esp // pop arguments
- CFI_ADJUST_CFA_OFFSET(-16)
+ DECREASE_FRAME 16 // Pop `artIsAssignableFromCode()` arguments
testl %eax, %eax
+ POP_ARG edx // Pop `art_quick_aput_obj()` arguments; flags unaffected.
+ POP_ARG ecx
+ POP_ARG eax
jz .Lthrow_array_store_exception
- POP edx
- POP ecx
- POP eax
POISON_HEAP_REF edx
- movl %edx, MIRROR_OBJECT_ARRAY_DATA_OFFSET(%eax, %ecx, 4) // do the aput
+ movl %edx, MIRROR_OBJECT_ARRAY_DATA_OFFSET(%eax, %ecx, 4) // Do the aput.
movl %fs:THREAD_CARD_TABLE_OFFSET, %edx
shrl LITERAL(CARD_TABLE_CARD_SHIFT), %eax
movb %dl, (%edx, %eax)
ret
- CFI_ADJUST_CFA_OFFSET(12) // 3 POP after the jz for unwinding.
+
.Lthrow_array_store_exception:
- POP edx
- POP ecx
- POP eax
- SETUP_SAVE_ALL_CALLEE_SAVES_FRAME ebx // save all registers as basis for long jump context
- // Outgoing argument set up
- PUSH eax // alignment padding
- pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
- CFI_ADJUST_CFA_OFFSET(4)
- PUSH edx // pass arg2 - value
- PUSH eax // pass arg1 - array
+#ifdef USE_READ_BARRIER
+ CFI_REMEMBER_STATE
+#endif // USE_READ_BARRIER
+ SETUP_SAVE_ALL_CALLEE_SAVES_FRAME ebx // Save all registers as basis for long jump context.
+ // Outgoing argument set up.
+ PUSH_ARG eax // Alignment padding.
+ PUSH_ARG fs:THREAD_SELF_OFFSET // Pass Thread::Current()
+ PUSH_ARG edx // Pass arg2 - value.
+ PUSH_ARG eax // Pass arg1 - array.
call SYMBOL(artThrowArrayStoreException) // (array, value, Thread*)
UNREACHABLE
+
+#ifdef USE_READ_BARRIER
+ CFI_RESTORE_STATE_AND_DEF_CFA esp, 4
+.Laput_obj_gc_marking:
+ PUSH_ARG eax // Save `art_quick_aput_obj()` arguments.
+ PUSH_ARG ecx // We need to align stack for `art_quick_read_barrier_mark_regNN`
+ PUSH_ARG edx // and use a register (EAX) as a temporary for the object class.
+ call SYMBOL(art_quick_read_barrier_mark_reg03) // Mark EBX.
+ movl MIRROR_CLASS_COMPONENT_TYPE_OFFSET(%ebx), %ebx
+ UNPOISON_HEAP_REF ebx
+ call SYMBOL(art_quick_read_barrier_mark_reg03) // Mark EBX.
+ movl MIRROR_OBJECT_CLASS_OFFSET(%edx), %eax
+ UNPOISON_HEAP_REF eax
+ call SYMBOL(art_quick_read_barrier_mark_reg00) // Mark EAX.
+ cmpl %eax, %ebx
+ jne .Laput_obj_check_assignability_gc_marking
+ POP_ARG edx // Restore `art_quick_aput_obj()` arguments.
+ POP_ARG ecx
+ POP_ARG eax
+ jmp .Laput_obj_store
+
+.Laput_obj_check_assignability_gc_marking:
+ // Prepare arguments in line with `.Laput_obj_check_assignability_call` and jump there.
+ // (EAX, ECX and EDX were already saved in the right stack slots.)
+ INCREASE_FRAME 8 // Alignment padding.
+ PUSH_ARG eax // Pass arg2 - type of the value to be stored.
+ // The arg1 shall be pushed at `.Laput_obj_check_assignability_call`.
+ jmp .Laput_obj_check_assignability_call
+#endif // USE_READ_BARRIER
END_FUNCTION art_quick_aput_obj
DEFINE_FUNCTION art_quick_memcpy
@@ -1501,21 +1528,21 @@
// Note: Functions `art{Get,Set}<Kind>{Static,Instance}FromCompiledCode` are
// defined with a macro in runtime/entrypoints/quick/quick_field_entrypoints.cc.
-ONE_ARG_REF_DOWNCALL art_quick_get_boolean_static, artGetBooleanStaticFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
-ONE_ARG_REF_DOWNCALL art_quick_get_byte_static, artGetByteStaticFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
-ONE_ARG_REF_DOWNCALL art_quick_get_char_static, artGetCharStaticFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
-ONE_ARG_REF_DOWNCALL art_quick_get_short_static, artGetShortStaticFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
-ONE_ARG_REF_DOWNCALL art_quick_get32_static, artGet32StaticFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
-ONE_ARG_REF_DOWNCALL art_quick_get64_static, artGet64StaticFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
-ONE_ARG_REF_DOWNCALL art_quick_get_obj_static, artGetObjStaticFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
+ONE_ARG_REF_DOWNCALL art_quick_get_boolean_static, artGetBooleanStaticFromCompiledCode, RETURN_OR_DEOPT_OR_DELIVER_PENDING_EXCEPTION
+ONE_ARG_REF_DOWNCALL art_quick_get_byte_static, artGetByteStaticFromCompiledCode, RETURN_OR_DEOPT_OR_DELIVER_PENDING_EXCEPTION
+ONE_ARG_REF_DOWNCALL art_quick_get_char_static, artGetCharStaticFromCompiledCode, RETURN_OR_DEOPT_OR_DELIVER_PENDING_EXCEPTION
+ONE_ARG_REF_DOWNCALL art_quick_get_short_static, artGetShortStaticFromCompiledCode, RETURN_OR_DEOPT_OR_DELIVER_PENDING_EXCEPTION
+ONE_ARG_REF_DOWNCALL art_quick_get32_static, artGet32StaticFromCompiledCode, RETURN_OR_DEOPT_OR_DELIVER_PENDING_EXCEPTION
+ONE_ARG_REF_DOWNCALL art_quick_get64_static, artGet64StaticFromCompiledCode, RETURN_OR_DEOPT_OR_DELIVER_PENDING_EXCEPTION
+ONE_ARG_REF_DOWNCALL art_quick_get_obj_static, artGetObjStaticFromCompiledCode, RETURN_OR_DEOPT_OR_DELIVER_PENDING_EXCEPTION
-TWO_ARG_REF_DOWNCALL art_quick_get_boolean_instance, artGetBooleanInstanceFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
-TWO_ARG_REF_DOWNCALL art_quick_get_byte_instance, artGetByteInstanceFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
-TWO_ARG_REF_DOWNCALL art_quick_get_char_instance, artGetCharInstanceFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
-TWO_ARG_REF_DOWNCALL art_quick_get_short_instance, artGetShortInstanceFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
-TWO_ARG_REF_DOWNCALL art_quick_get32_instance, artGet32InstanceFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
-TWO_ARG_REF_DOWNCALL art_quick_get64_instance, artGet64InstanceFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
-TWO_ARG_REF_DOWNCALL art_quick_get_obj_instance, artGetObjInstanceFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
+TWO_ARG_REF_DOWNCALL art_quick_get_boolean_instance, artGetBooleanInstanceFromCompiledCode, RETURN_OR_DEOPT_OR_DELIVER_PENDING_EXCEPTION
+TWO_ARG_REF_DOWNCALL art_quick_get_byte_instance, artGetByteInstanceFromCompiledCode, RETURN_OR_DEOPT_OR_DELIVER_PENDING_EXCEPTION
+TWO_ARG_REF_DOWNCALL art_quick_get_char_instance, artGetCharInstanceFromCompiledCode, RETURN_OR_DEOPT_OR_DELIVER_PENDING_EXCEPTION
+TWO_ARG_REF_DOWNCALL art_quick_get_short_instance, artGetShortInstanceFromCompiledCode, RETURN_OR_DEOPT_OR_DELIVER_PENDING_EXCEPTION
+TWO_ARG_REF_DOWNCALL art_quick_get32_instance, artGet32InstanceFromCompiledCode, RETURN_OR_DEOPT_OR_DELIVER_PENDING_EXCEPTION
+TWO_ARG_REF_DOWNCALL art_quick_get64_instance, artGet64InstanceFromCompiledCode, RETURN_OR_DEOPT_OR_DELIVER_PENDING_EXCEPTION
+TWO_ARG_REF_DOWNCALL art_quick_get_obj_instance, artGetObjInstanceFromCompiledCode, RETURN_OR_DEOPT_OR_DELIVER_PENDING_EXCEPTION
TWO_ARG_REF_DOWNCALL art_quick_set8_static, artSet8StaticFromCompiledCode, RETURN_IF_EAX_ZERO
TWO_ARG_REF_DOWNCALL art_quick_set16_static, artSet16StaticFromCompiledCode, RETURN_IF_EAX_ZERO
@@ -1616,7 +1643,7 @@
movl %eax, %edi // remember code pointer in EDI
addl LITERAL(16), %esp // pop arguments
CFI_ADJUST_CFA_OFFSET(-16)
- test %eax, %eax // if code pointer is null goto deliver pending exception
+ test %eax, %eax // if code pointer is null goto deliver the OOME.
jz 1f
RESTORE_SAVE_REFS_AND_ARGS_FRAME_AND_JUMP
1:
@@ -1974,34 +2001,29 @@
SETUP_SAVE_REFS_ONLY_FRAME ebx // save ref containing registers for GC
// Outgoing argument set up
leal FRAME_SIZE_SAVE_REFS_ONLY + __SIZEOF_POINTER__(%esp), %edi // prepare args
- push %eax // push padding
+ push %eax // push padding
CFI_ADJUST_CFA_OFFSET(4)
- pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
+ pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
CFI_ADJUST_CFA_OFFSET(4)
- push %edi // pass args
+ push %edi // pass args
CFI_ADJUST_CFA_OFFSET(4)
- push %eax // pass format
+ push %eax // pass format
CFI_ADJUST_CFA_OFFSET(4)
- call SYMBOL(artStringBuilderAppend) // (uint32_t, const unit32_t*, Thread*)
- addl MACRO_LITERAL(16), %esp // pop arguments
+ call SYMBOL(artStringBuilderAppend) // (uint32_t, const unit32_t*, Thread*)
+ addl MACRO_LITERAL(16), %esp // pop arguments
CFI_ADJUST_CFA_OFFSET(-16)
- RESTORE_SAVE_REFS_ONLY_FRAME // restore frame up to return address
- RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER // return or deliver exception
+ RESTORE_SAVE_REFS_ONLY_FRAME // restore frame up to return address
+ RETURN_IF_RESULT_IS_NON_ZERO_OR_DEOPT_OR_DELIVER // return or deliver exception
END_FUNCTION art_quick_string_builder_append
// Create a function `name` calling the ReadBarrier::Mark routine,
// getting its argument and returning its result through register
// `reg`, saving and restoring all caller-save registers.
//
-// If `reg` is different from `eax`, the generated function follows a
-// non-standard runtime calling convention:
-// - register `reg` is used to pass the (sole) argument of this function
-// (instead of EAX);
-// - register `reg` is used to return the result of this function
-// (instead of EAX);
-// - EAX is treated like a normal (non-argument) caller-save register;
-// - everything else is the same as in the standard runtime calling
-// convention (e.g. standard callee-save registers are preserved).
+// The generated function follows a non-standard runtime calling convention:
+// - register `reg` (which may differ from EAX) is used to pass the (sole) argument,
+// - register `reg` (which may differ from EAX) is used to return the result,
+// - all other registers are callee-save (the values they hold are preserved).
MACRO2(READ_BARRIER_MARK_REG, name, reg)
DEFINE_FUNCTION VAR(name)
// Null check so that we can load the lock word.
diff --git a/runtime/arch/x86_64/asm_support_x86_64.h b/runtime/arch/x86_64/asm_support_x86_64.h
index 51befbe..e389c78 100644
--- a/runtime/arch/x86_64/asm_support_x86_64.h
+++ b/runtime/arch/x86_64/asm_support_x86_64.h
@@ -25,5 +25,7 @@
#define FRAME_SIZE_SAVE_EVERYTHING (144 + 16*8)
#define FRAME_SIZE_SAVE_EVERYTHING_FOR_CLINIT FRAME_SIZE_SAVE_EVERYTHING
#define FRAME_SIZE_SAVE_EVERYTHING_FOR_SUSPEND_CHECK FRAME_SIZE_SAVE_EVERYTHING
+#define SAVE_EVERYTHING_FRAME_RAX_OFFSET \
+ (FRAME_SIZE_SAVE_EVERYTHING - CALLEE_SAVE_EVERYTHING_NUM_CORE_SPILLS * POINTER_SIZE)
#endif // ART_RUNTIME_ARCH_X86_64_ASM_SUPPORT_X86_64_H_
diff --git a/runtime/arch/x86_64/jni_entrypoints_x86_64.S b/runtime/arch/x86_64/jni_entrypoints_x86_64.S
index 0d5fa3f..d2f1fe1 100644
--- a/runtime/arch/x86_64/jni_entrypoints_x86_64.S
+++ b/runtime/arch/x86_64/jni_entrypoints_x86_64.S
@@ -118,7 +118,7 @@
// Call artFindNativeMethod() for normal native and artFindNativeMethodRunnable()
// for @FastNative or @CriticalNative.
movq THREAD_TOP_QUICK_FRAME_OFFSET(%rdi), %rax // uintptr_t tagged_quick_frame
- andq LITERAL(0xfffffffffffffffe), %rax // ArtMethod** sp
+ andq LITERAL(TAGGED_JNI_SP_MASK_TOGGLED64), %rax // ArtMethod** sp
movq (%rax), %rax // ArtMethod* method
testl LITERAL(ACCESS_FLAGS_METHOD_IS_FAST_NATIVE | ACCESS_FLAGS_METHOD_IS_CRITICAL_NATIVE), \
ART_METHOD_ACCESS_FLAGS_OFFSET(%rax)
diff --git a/runtime/arch/x86_64/quick_entrypoints_x86_64.S b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
index 673696c..3c3501f 100644
--- a/runtime/arch/x86_64/quick_entrypoints_x86_64.S
+++ b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
@@ -731,12 +731,9 @@
movl %eax, %edi // pass the index of the constant as arg0
movq %gs:THREAD_SELF_OFFSET, %rsi // pass Thread::Current()
call CALLVAR(cxx_name) // cxx_name(arg0, Thread*)
- testl %eax, %eax // If result is null, deliver the OOME.
+ testl %eax, %eax // If result is null, deliver pending exception.
jz 1f
- CFI_REMEMBER_STATE
- RESTORE_SAVE_EVERYTHING_FRAME_KEEP_RAX // restore frame up to return address
- ret
- CFI_RESTORE_STATE_AND_DEF_CFA rsp, FRAME_SIZE_SAVE_EVERYTHING
+ DEOPT_OR_RESTORE_SAVE_EVERYTHING_FRAME_AND_RETURN_RAX /*is_ref=*/1
1:
DELIVER_PENDING_EXCEPTION_FRAME_READY
END_FUNCTION VAR(c_name)
@@ -746,18 +743,65 @@
ONE_ARG_SAVE_EVERYTHING_DOWNCALL \c_name, \cxx_name, RUNTIME_SAVE_EVERYTHING_FOR_CLINIT_METHOD_OFFSET
END_MACRO
-MACRO0(RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER)
+MACRO0(RETURN_IF_RESULT_IS_NON_ZERO_OR_DEOPT_OR_DELIVER)
testq %rax, %rax // rax == 0 ?
jz 1f // if rax == 0 goto 1
- ret // return
+ DEOPT_OR_RETURN /*is_ref=*/1 // Check if deopt is required
1: // deliver exception on current thread
DELIVER_PENDING_EXCEPTION
END_MACRO
+
+MACRO0(RETURN_OR_DEOPT_OR_DELIVER_PENDING_EXCEPTION)
+ movq %gs:THREAD_EXCEPTION_OFFSET, %rcx // get exception field
+ testq %rcx, %rcx // rcx == 0 ?
+ jnz 1f // if rcx != 0 goto 1
+ DEOPT_OR_RETURN // Check if deopt is required
+1: // deliver exception on current thread
+ DELIVER_PENDING_EXCEPTION
+END_MACRO
+
+MACRO1(DEOPT_OR_RETURN, is_ref = 0)
+ cmpl LITERAL(0), %gs:THREAD_DEOPT_CHECK_REQUIRED_OFFSET
+ jne 2f
+ ret
+2:
+ SETUP_SAVE_EVERYTHING_FRAME
+ movq LITERAL(\is_ref), %rdx // pass if result is a reference
+ movq %rax, %rsi // pass the result
+ movq %gs:THREAD_SELF_OFFSET, %rdi // pass Thread::Current
+ call SYMBOL(artDeoptimizeIfNeeded)
+ CFI_REMEMBER_STATE
+ RESTORE_SAVE_EVERYTHING_FRAME
+ ret
+ CFI_RESTORE_STATE_AND_DEF_CFA rsp, FRAME_SIZE_SAVE_EVERYTHING
+END_MACRO
+
+MACRO1(DEOPT_OR_RESTORE_SAVE_EVERYTHING_FRAME_AND_RETURN_RAX, is_ref = 0)
+ cmpl LITERAL(0), %gs:THREAD_DEOPT_CHECK_REQUIRED_OFFSET
+ jne 2f
+ CFI_REMEMBER_STATE
+ RESTORE_SAVE_EVERYTHING_FRAME_KEEP_RAX
+ ret
+ CFI_RESTORE_STATE_AND_DEF_CFA rsp, FRAME_SIZE_SAVE_EVERYTHING
+2:
+ movq %rax, SAVE_EVERYTHING_FRAME_RAX_OFFSET(%rsp) // update result in the frame
+ movq LITERAL(\is_ref), %rdx // pass if result is a reference
+ movq %rax, %rsi // pass the result
+ movq %gs:THREAD_SELF_OFFSET, %rdi // pass Thread::Current
+ call SYMBOL(artDeoptimizeIfNeeded)
+ CFI_REMEMBER_STATE
+ RESTORE_SAVE_EVERYTHING_FRAME
+ ret
+ CFI_RESTORE_STATE_AND_DEF_CFA rsp, FRAME_SIZE_SAVE_EVERYTHING
+END_MACRO
+
+
+
MACRO0(RETURN_IF_EAX_ZERO)
testl %eax, %eax // eax == 0 ?
jnz 1f // if eax != 0 goto 1
- ret // return
+ DEOPT_OR_RETURN // Check if we need a deopt
1: // deliver exception on current thread
DELIVER_PENDING_EXCEPTION
END_MACRO
@@ -859,7 +903,7 @@
movq %gs:THREAD_SELF_OFFSET, %rsi // pass Thread::Current()
call CALLVAR(cxx_name) // cxx_name(arg0, Thread*)
RESTORE_SAVE_REFS_ONLY_FRAME // restore frame up to return address
- RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER // return or deliver exception
+ RETURN_IF_RESULT_IS_NON_ZERO_OR_DEOPT_OR_DELIVER // return or deliver exception
END_FUNCTION VAR(c_name)
END_MACRO
@@ -931,7 +975,7 @@
movq %gs:THREAD_SELF_OFFSET, %rsi // pass Thread::Current()
call CALLVAR(cxx_name) // cxx_name(arg0, Thread*)
RESTORE_SAVE_REFS_ONLY_FRAME // restore frame up to return address
- RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER // return or deliver exception
+ RETURN_IF_RESULT_IS_NON_ZERO_OR_DEOPT_OR_DELIVER // return or deliver exception
END_MACRO
// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_tlab, TLAB). May be
@@ -1019,7 +1063,7 @@
movq %gs:THREAD_SELF_OFFSET, %rdx // pass Thread::Current()
call CALLVAR(cxx_name) // cxx_name(arg0, arg1, Thread*)
RESTORE_SAVE_REFS_ONLY_FRAME // restore frame up to return address
- RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER // return or deliver exception
+ RETURN_IF_RESULT_IS_NON_ZERO_OR_DEOPT_OR_DELIVER // return or deliver exception
END_FUNCTION VAR(c_entrypoint)
END_MACRO
@@ -1163,134 +1207,89 @@
.endif
END_MACRO
- /*
- * Macro to insert read barrier, used in art_quick_aput_obj.
- * obj_reg and dest_reg{32|64} are registers, offset is a defined literal such as
- * MIRROR_OBJECT_CLASS_OFFSET. dest_reg needs two versions to handle the mismatch between
- * 64b PUSH/POP and 32b argument.
- * TODO: When read barrier has a fast path, add heap unpoisoning support for the fast path.
- *
- * As with art_quick_aput_obj function, the 64b versions are in comments.
- */
-MACRO4(READ_BARRIER, obj_reg, offset, dest_reg32, dest_reg64)
-#ifdef USE_READ_BARRIER
- PUSH rax // save registers that might be used
- PUSH rdi
- PUSH rsi
- PUSH rdx
- PUSH rcx
- SETUP_FP_CALLEE_SAVE_FRAME
- // Outgoing argument set up
- // movl REG_VAR(ref_reg32), %edi // pass ref, no-op for now since parameter ref is unused
- // // movq REG_VAR(ref_reg64), %rdi
- movl REG_VAR(obj_reg), %esi // pass obj_reg
- // movq REG_VAR(obj_reg), %rsi
- movl MACRO_LITERAL((RAW_VAR(offset))), %edx // pass offset, double parentheses are necessary
- // movq MACRO_LITERAL((RAW_VAR(offset))), %rdx
- call SYMBOL(artReadBarrierSlow) // artReadBarrierSlow(ref, obj_reg, offset)
- // No need to unpoison return value in rax, artReadBarrierSlow() would do the unpoisoning.
- .ifnc RAW_VAR(dest_reg32), eax
- // .ifnc RAW_VAR(dest_reg64), rax
- movl %eax, REG_VAR(dest_reg32) // save loaded ref in dest_reg
- // movq %rax, REG_VAR(dest_reg64)
- .endif
- RESTORE_FP_CALLEE_SAVE_FRAME
- POP_REG_NE rcx, RAW_VAR(dest_reg64) // Restore registers except dest_reg
- POP_REG_NE rdx, RAW_VAR(dest_reg64)
- POP_REG_NE rsi, RAW_VAR(dest_reg64)
- POP_REG_NE rdi, RAW_VAR(dest_reg64)
- POP_REG_NE rax, RAW_VAR(dest_reg64)
-#else
- movl RAW_VAR(offset)(REG_VAR(obj_reg)), REG_VAR(dest_reg32)
- // movq RAW_VAR(offset)(REG_VAR(obj_reg)), REG_VAR(dest_reg64)
- UNPOISON_HEAP_REF RAW_VAR(dest_reg32) // UNPOISON_HEAP_REF only takes a 32b register
-#endif // USE_READ_BARRIER
-END_MACRO
-
DEFINE_FUNCTION art_quick_aput_obj
- testl %edx, %edx // store of null
-// test %rdx, %rdx
- jz .Ldo_aput_null
- READ_BARRIER edi, MIRROR_OBJECT_CLASS_OFFSET, ecx, rcx
- // READ_BARRIER rdi, MIRROR_OBJECT_CLASS_OFFSET, ecx, rcx
- READ_BARRIER ecx, MIRROR_CLASS_COMPONENT_TYPE_OFFSET, ecx, rcx
- // READ_BARRIER rcx, MIRROR_CLASS_COMPONENT_TYPE_OFFSET, ecx, rcx
-#if defined(USE_HEAP_POISONING) || defined(USE_READ_BARRIER)
- READ_BARRIER edx, MIRROR_OBJECT_CLASS_OFFSET, eax, rax // rax is free.
- // READ_BARRIER rdx, MIRROR_OBJECT_CLASS_OFFSET, eax, rax
- cmpl %eax, %ecx // value's type == array's component type - trivial assignability
-#else
- cmpl MIRROR_OBJECT_CLASS_OFFSET(%edx), %ecx // value's type == array's component type - trivial assignability
-// cmpq MIRROR_CLASS_OFFSET(%rdx), %rcx
-#endif
- jne .Lcheck_assignability
-.Ldo_aput:
+ test %edx, %edx // store of null
+ jz .Laput_obj_null
+ movl MIRROR_OBJECT_CLASS_OFFSET(%rdi), %ecx
+ UNPOISON_HEAP_REF ecx
+#ifdef USE_READ_BARRIER
+ cmpl LITERAL(0), %gs:THREAD_IS_GC_MARKING_OFFSET
+ jnz .Laput_obj_gc_marking
+#endif // USE_READ_BARRIER
+ movl MIRROR_CLASS_COMPONENT_TYPE_OFFSET(%rcx), %ecx
+ cmpl MIRROR_OBJECT_CLASS_OFFSET(%rdx), %ecx // Both poisoned if heap poisoning is enabled.
+ jne .Laput_obj_check_assignability
+.Laput_obj_store:
POISON_HEAP_REF edx
- movl %edx, MIRROR_OBJECT_ARRAY_DATA_OFFSET(%edi, %esi, 4)
-// movq %rdx, MIRROR_OBJECT_ARRAY_DATA_OFFSET(%rdi, %rsi, 4)
+ movl %edx, MIRROR_OBJECT_ARRAY_DATA_OFFSET(%rdi, %rsi, 4)
movq %gs:THREAD_CARD_TABLE_OFFSET, %rdx
shrl LITERAL(CARD_TABLE_CARD_SHIFT), %edi
-// shrl LITERAL(CARD_TABLE_CARD_SHIFT), %rdi
- movb %dl, (%rdx, %rdi) // Note: this assumes that top 32b of %rdi are zero
+ movb %dl, (%rdx, %rdi)
ret
-.Ldo_aput_null:
- movl %edx, MIRROR_OBJECT_ARRAY_DATA_OFFSET(%edi, %esi, 4)
-// movq %rdx, MIRROR_OBJECT_ARRAY_DATA_OFFSET(%rdi, %rsi, 4)
+
+.Laput_obj_null:
+ movl %edx, MIRROR_OBJECT_ARRAY_DATA_OFFSET(%rdi, %rsi, 4)
ret
-.Lcheck_assignability:
- // Save arguments.
- PUSH rdi
- PUSH rsi
- PUSH rdx
+
+.Laput_obj_check_assignability:
+ UNPOISON_HEAP_REF ecx // Unpoison array component type if poisoning is enabled.
+ PUSH_ARG rdi // Save arguments.
+ PUSH_ARG rsi
+ PUSH_ARG rdx
+ movl MIRROR_OBJECT_CLASS_OFFSET(%rdx), %esi // Pass arg2 = value's class.
+ UNPOISON_HEAP_REF esi
+.Laput_obj_check_assignability_call:
+ movl %ecx, %edi // Pass arg1 = array's component type.
SETUP_FP_CALLEE_SAVE_FRAME
-
-#if defined(USE_HEAP_POISONING) || defined(USE_READ_BARRIER)
- // The load of MIRROR_OBJECT_CLASS_OFFSET(%edx) is redundant, eax still holds the value.
- movl %eax, %esi // Pass arg2 = value's class.
- // movq %rax, %rsi
-#else
- // "Uncompress" = do nothing, as already zero-extended on load.
- movl MIRROR_OBJECT_CLASS_OFFSET(%edx), %esi // Pass arg2 = value's class.
-#endif
- movq %rcx, %rdi // Pass arg1 = array's component type.
-
call SYMBOL(artIsAssignableFromCode) // (Class* a, Class* b)
-
- // Exception?
- testq %rax, %rax
- jz .Lthrow_array_store_exception
-
- RESTORE_FP_CALLEE_SAVE_FRAME
- // Restore arguments.
- POP rdx
- POP rsi
- POP rdi
-
+ RESTORE_FP_CALLEE_SAVE_FRAME // Resore FP registers.
+ POP_ARG rdx // Restore arguments.
+ POP_ARG rsi
+ POP_ARG rdi
+ testq %rax, %rax // Check for exception.
+ jz .Laput_obj_throw_array_store_exception
POISON_HEAP_REF edx
- movl %edx, MIRROR_OBJECT_ARRAY_DATA_OFFSET(%edi, %esi, 4)
-// movq %rdx, MIRROR_OBJECT_ARRAY_DATA_OFFSET(%rdi, %rsi, 4)
+ movl %edx, MIRROR_OBJECT_ARRAY_DATA_OFFSET(%rdi, %rsi, 4)
movq %gs:THREAD_CARD_TABLE_OFFSET, %rdx
shrl LITERAL(CARD_TABLE_CARD_SHIFT), %edi
-// shrl LITERAL(CARD_TABLE_CARD_SHIFT), %rdi
- movb %dl, (%rdx, %rdi) // Note: this assumes that top 32b of %rdi are zero
-// movb %dl, (%rdx, %rdi)
+ movb %dl, (%rdx, %rdi)
ret
- CFI_ADJUST_CFA_OFFSET(24 + 4 * 8) // Reset unwind info so following code unwinds.
-.Lthrow_array_store_exception:
- RESTORE_FP_CALLEE_SAVE_FRAME
- // Restore arguments.
- POP rdx
- POP rsi
- POP rdi
+.Laput_obj_throw_array_store_exception:
+#ifdef USE_READ_BARRIER
+ CFI_REMEMBER_STATE
+#endif // USE_READ_BARRIER
SETUP_SAVE_ALL_CALLEE_SAVES_FRAME // Save all registers as basis for long jump context.
-
// Outgoing argument set up.
movq %rdx, %rsi // Pass arg 2 = value.
movq %gs:THREAD_SELF_OFFSET, %rdx // Pass arg 3 = Thread::Current().
// Pass arg 1 = array.
call SYMBOL(artThrowArrayStoreException) // (array, value, Thread*)
UNREACHABLE
+
+#ifdef USE_READ_BARRIER
+ CFI_RESTORE_STATE_AND_DEF_CFA esp, 4
+.Laput_obj_gc_marking:
+ // We need to align stack for `art_quick_read_barrier_mark_regNN`.
+ INCREASE_FRAME 8 // Stack alignment.
+ call SYMBOL(art_quick_read_barrier_mark_reg01) // Mark ECX
+ movl MIRROR_CLASS_COMPONENT_TYPE_OFFSET(%rcx), %ecx
+ UNPOISON_HEAP_REF ecx
+ call SYMBOL(art_quick_read_barrier_mark_reg01) // Mark ECX
+ movl MIRROR_OBJECT_CLASS_OFFSET(%rdx), %eax
+ UNPOISON_HEAP_REF eax
+ call SYMBOL(art_quick_read_barrier_mark_reg00) // Mark EAX
+ DECREASE_FRAME 8 // Remove stack alignment.
+ cmpl %eax, %ecx
+ je .Laput_obj_store
+ // Prepare arguments in line with `.Laput_obj_check_assignability_call` and jump there.
+ PUSH_ARG rdi // Save arguments.
+ PUSH_ARG rsi
+ PUSH_ARG rdx
+ movl %eax, %esi // Pass arg2 - type of the value to be stored.
+ // The arg1 shall be moved at `.Ldo_assignability_check_call`.
+ jmp .Laput_obj_check_assignability_call
+#endif // USE_READ_BARRIER
END_FUNCTION art_quick_aput_obj
// TODO: This is quite silly on X86_64 now.
@@ -1324,27 +1323,27 @@
THREE_ARG_REF_DOWNCALL art_quick_set64_instance, artSet64InstanceFromCompiledCode, RETURN_IF_EAX_ZERO
THREE_ARG_REF_DOWNCALL art_quick_set_obj_instance, artSetObjInstanceFromCompiledCode, RETURN_IF_EAX_ZERO
-TWO_ARG_REF_DOWNCALL art_quick_get_byte_instance, artGetByteInstanceFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
-TWO_ARG_REF_DOWNCALL art_quick_get_boolean_instance, artGetBooleanInstanceFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
-TWO_ARG_REF_DOWNCALL art_quick_get_short_instance, artGetShortInstanceFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
-TWO_ARG_REF_DOWNCALL art_quick_get_char_instance, artGetCharInstanceFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
-TWO_ARG_REF_DOWNCALL art_quick_get32_instance, artGet32InstanceFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
-TWO_ARG_REF_DOWNCALL art_quick_get64_instance, artGet64InstanceFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
-TWO_ARG_REF_DOWNCALL art_quick_get_obj_instance, artGetObjInstanceFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
+TWO_ARG_REF_DOWNCALL art_quick_get_byte_instance, artGetByteInstanceFromCompiledCode, RETURN_OR_DEOPT_OR_DELIVER_PENDING_EXCEPTION
+TWO_ARG_REF_DOWNCALL art_quick_get_boolean_instance, artGetBooleanInstanceFromCompiledCode, RETURN_OR_DEOPT_OR_DELIVER_PENDING_EXCEPTION
+TWO_ARG_REF_DOWNCALL art_quick_get_short_instance, artGetShortInstanceFromCompiledCode, RETURN_OR_DEOPT_OR_DELIVER_PENDING_EXCEPTION
+TWO_ARG_REF_DOWNCALL art_quick_get_char_instance, artGetCharInstanceFromCompiledCode, RETURN_OR_DEOPT_OR_DELIVER_PENDING_EXCEPTION
+TWO_ARG_REF_DOWNCALL art_quick_get32_instance, artGet32InstanceFromCompiledCode, RETURN_OR_DEOPT_OR_DELIVER_PENDING_EXCEPTION
+TWO_ARG_REF_DOWNCALL art_quick_get64_instance, artGet64InstanceFromCompiledCode, RETURN_OR_DEOPT_OR_DELIVER_PENDING_EXCEPTION
+TWO_ARG_REF_DOWNCALL art_quick_get_obj_instance, artGetObjInstanceFromCompiledCode, RETURN_OR_DEOPT_OR_DELIVER_PENDING_EXCEPTION
TWO_ARG_REF_DOWNCALL art_quick_set8_static, artSet8StaticFromCompiledCode, RETURN_IF_EAX_ZERO
TWO_ARG_REF_DOWNCALL art_quick_set16_static, artSet16StaticFromCompiledCode, RETURN_IF_EAX_ZERO
TWO_ARG_REF_DOWNCALL art_quick_set32_static, artSet32StaticFromCompiledCode, RETURN_IF_EAX_ZERO
-TWO_ARG_REF_DOWNCALL art_quick_set64_static, artSet64StaticFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
+TWO_ARG_REF_DOWNCALL art_quick_set64_static, artSet64StaticFromCompiledCode, RETURN_OR_DEOPT_OR_DELIVER_PENDING_EXCEPTION
TWO_ARG_REF_DOWNCALL art_quick_set_obj_static, artSetObjStaticFromCompiledCode, RETURN_IF_EAX_ZERO
-ONE_ARG_REF_DOWNCALL art_quick_get_byte_static, artGetByteStaticFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
-ONE_ARG_REF_DOWNCALL art_quick_get_boolean_static, artGetBooleanStaticFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
-ONE_ARG_REF_DOWNCALL art_quick_get_short_static, artGetShortStaticFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
-ONE_ARG_REF_DOWNCALL art_quick_get_char_static, artGetCharStaticFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
-ONE_ARG_REF_DOWNCALL art_quick_get32_static, artGet32StaticFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
-ONE_ARG_REF_DOWNCALL art_quick_get64_static, artGet64StaticFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
-ONE_ARG_REF_DOWNCALL art_quick_get_obj_static, artGetObjStaticFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
+ONE_ARG_REF_DOWNCALL art_quick_get_byte_static, artGetByteStaticFromCompiledCode, RETURN_OR_DEOPT_OR_DELIVER_PENDING_EXCEPTION
+ONE_ARG_REF_DOWNCALL art_quick_get_boolean_static, artGetBooleanStaticFromCompiledCode, RETURN_OR_DEOPT_OR_DELIVER_PENDING_EXCEPTION
+ONE_ARG_REF_DOWNCALL art_quick_get_short_static, artGetShortStaticFromCompiledCode, RETURN_OR_DEOPT_OR_DELIVER_PENDING_EXCEPTION
+ONE_ARG_REF_DOWNCALL art_quick_get_char_static, artGetCharStaticFromCompiledCode, RETURN_OR_DEOPT_OR_DELIVER_PENDING_EXCEPTION
+ONE_ARG_REF_DOWNCALL art_quick_get32_static, artGet32StaticFromCompiledCode, RETURN_OR_DEOPT_OR_DELIVER_PENDING_EXCEPTION
+ONE_ARG_REF_DOWNCALL art_quick_get64_static, artGet64StaticFromCompiledCode, RETURN_OR_DEOPT_OR_DELIVER_PENDING_EXCEPTION
+ONE_ARG_REF_DOWNCALL art_quick_get_obj_static, artGetObjStaticFromCompiledCode, RETURN_OR_DEOPT_OR_DELIVER_PENDING_EXCEPTION
DEFINE_FUNCTION art_quick_proxy_invoke_handler
SETUP_SAVE_REFS_AND_ARGS_FRAME_WITH_METHOD_IN_RDI
@@ -1846,7 +1845,7 @@
movq %gs:THREAD_SELF_OFFSET, %rdx // pass Thread::Current()
call artStringBuilderAppend // (uint32_t, const unit32_t*, Thread*)
RESTORE_SAVE_REFS_ONLY_FRAME // restore frame up to return address
- RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER // return or deliver exception
+ RETURN_IF_RESULT_IS_NON_ZERO_OR_DEOPT_OR_DELIVER // return or deopt or deliver exception
END_FUNCTION art_quick_string_builder_append
// Create a function `name` calling the ReadBarrier::Mark routine,
@@ -1855,16 +1854,9 @@
//
// The generated function follows a non-standard runtime calling
// convention:
-// - register `reg` (which may be different from RDI) is used to pass
-// the (sole) argument of this function;
-// - register `reg` (which may be different from RAX) is used to return
-// the result of this function (instead of RAX);
-// - if `reg` is different from `rdi`, RDI is treated like a normal
-// (non-argument) caller-save register;
-// - if `reg` is different from `rax`, RAX is treated like a normal
-// (non-result) caller-save register;
-// - everything else is the same as in the standard runtime calling
-// convention (e.g. standard callee-save registers are preserved).
+// - register `reg` (which may be different from RDI) is used to pass the (sole) argument,
+// - register `reg` (which may be different from RAX) is used to return the result,
+// - all other registers are callee-save (the values they hold are preserved).
MACRO2(READ_BARRIER_MARK_REG, name, reg)
DEFINE_FUNCTION VAR(name)
// Null check so that we can load the lock word.
diff --git a/runtime/art_method.cc b/runtime/art_method.cc
index f6f8b5f..40b7a7b 100644
--- a/runtime/art_method.cc
+++ b/runtime/art_method.cc
@@ -551,6 +551,12 @@
return nullptr;
}
+ // We should not reach here with a pc of 0. pc can be 0 for downcalls when walking the stack.
+ // For native methods this case is handled by the caller by checking the quick frame tag. See
+ // StackVisitor::WalkStack for more details. For non-native methods pc can be 0 only for runtime
+ // methods or proxy invoke handlers which are handled earlier.
+ DCHECK_NE(pc, 0u) << "PC 0 for " << PrettyMethod();
+
// Check whether the current entry point contains this pc.
if (!class_linker->IsQuickGenericJniStub(existing_entry_point) &&
!class_linker->IsQuickResolutionStub(existing_entry_point) &&
@@ -592,21 +598,17 @@
OatFile::OatMethod oat_method =
FindOatMethodFor(this, class_linker->GetImagePointerSize(), &found);
if (!found) {
- if (IsNative()) {
- // We are running the GenericJNI stub. The entrypoint may point
- // to different entrypoints or to a JIT-compiled JNI stub.
- DCHECK(class_linker->IsQuickGenericJniStub(existing_entry_point) ||
- class_linker->IsQuickResolutionStub(existing_entry_point) ||
- existing_entry_point == GetQuickInstrumentationEntryPoint() ||
- (jit != nullptr && jit->GetCodeCache()->ContainsPc(existing_entry_point)))
- << " entrypoint: " << existing_entry_point
- << " size: " << OatQuickMethodHeader::FromEntryPoint(existing_entry_point)->GetCodeSize()
- << " pc: " << reinterpret_cast<const void*>(pc);
- return nullptr;
- }
- // Only for unit tests.
- // TODO(ngeoffray): Update these tests to pass the right pc?
- return OatQuickMethodHeader::FromEntryPoint(existing_entry_point);
+ CHECK(IsNative());
+ // We are running the GenericJNI stub. The entrypoint may point
+ // to different entrypoints or to a JIT-compiled JNI stub.
+ DCHECK(class_linker->IsQuickGenericJniStub(existing_entry_point) ||
+ class_linker->IsQuickResolutionStub(existing_entry_point) ||
+ existing_entry_point == GetQuickInstrumentationEntryPoint() ||
+ (jit != nullptr && jit->GetCodeCache()->ContainsPc(existing_entry_point)))
+ << " entrypoint: " << existing_entry_point
+ << " size: " << OatQuickMethodHeader::FromEntryPoint(existing_entry_point)->GetCodeSize()
+ << " pc: " << reinterpret_cast<const void*>(pc);
+ return nullptr;
}
const void* oat_entry_point = oat_method.GetQuickCode();
if (oat_entry_point == nullptr || class_linker->IsQuickGenericJniStub(oat_entry_point)) {
@@ -615,10 +617,13 @@
}
OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromEntryPoint(oat_entry_point);
- if (pc == 0) {
- // This is a downcall, it can only happen for a native method.
- DCHECK(IsNative());
- return method_header;
+ // We could have existing Oat code for native methods but we may not use it if the runtime is java
+ // debuggable or when profiling boot class path. There is no easy way to check if the pc
+ // corresponds to QuickGenericJniStub. Since we have eliminated all the other cases, if the pc
+ // doesn't correspond to the AOT code then we must be running QuickGenericJniStub.
+ if (IsNative() && !method_header->Contains(pc)) {
+ DCHECK_NE(pc, 0u) << "PC 0 for " << PrettyMethod();
+ return nullptr;
}
DCHECK(method_header->Contains(pc))
diff --git a/runtime/base/locks.h b/runtime/base/locks.h
index 829adff..c15e5de 100644
--- a/runtime/base/locks.h
+++ b/runtime/base/locks.h
@@ -68,12 +68,12 @@
// Can be held while GC related work is done, and thus must be above kMarkSweepMarkStackLock
kThreadWaitLock,
kCHALock,
- kJitCodeCacheLock,
kRosAllocGlobalLock,
kRosAllocBracketLock,
kRosAllocBulkFreeLock,
kAllocSpaceLock,
kTaggingLockLevel,
+ kJitCodeCacheLock,
kTransactionLogLock,
kCustomTlsLock,
kJniFunctionTableLock,
diff --git a/runtime/class_linker_test.cc b/runtime/class_linker_test.cc
index 010b384..6a6a388 100644
--- a/runtime/class_linker_test.cc
+++ b/runtime/class_linker_test.cc
@@ -598,6 +598,7 @@
struct ClassExtOffsets : public CheckOffsets<mirror::ClassExt> {
ClassExtOffsets() : CheckOffsets<mirror::ClassExt>(false, "Ldalvik/system/ClassExt;") {
+ addOffset(OFFSETOF_MEMBER(mirror::ClassExt, class_value_map_), "classValueMap");
addOffset(OFFSETOF_MEMBER(mirror::ClassExt, erroneous_state_error_), "erroneousStateError");
addOffset(OFFSETOF_MEMBER(mirror::ClassExt, instance_jfield_ids_), "instanceJfieldIDs");
addOffset(OFFSETOF_MEMBER(mirror::ClassExt, jmethod_ids_), "jmethodIDs");
diff --git a/runtime/entrypoints/jni/jni_entrypoints.cc b/runtime/entrypoints/jni/jni_entrypoints.cc
index c78b604..80eb89f 100644
--- a/runtime/entrypoints/jni/jni_entrypoints.cc
+++ b/runtime/entrypoints/jni/jni_entrypoints.cc
@@ -87,11 +87,11 @@
}
// Replace the runtime method on the stack with the target method.
- DCHECK(!self->GetManagedStack()->GetTopQuickFrameTag());
+ DCHECK(!self->GetManagedStack()->GetTopQuickFrameGenericJniTag());
ArtMethod** sp = self->GetManagedStack()->GetTopQuickFrameKnownNotTagged();
DCHECK(*sp == Runtime::Current()->GetCalleeSaveMethod(CalleeSaveType::kSaveRefsAndArgs));
*sp = target_method;
- self->SetTopOfStackTagged(sp); // Fake GenericJNI frame.
+ self->SetTopOfStackGenericJniTagged(sp); // Fake GenericJNI frame.
// Continue with the target method.
method = target_method;
diff --git a/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc b/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc
index 60a5875..76bee21 100644
--- a/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc
@@ -23,6 +23,7 @@
#include "dex/dex_file_types.h"
#include "entrypoints/entrypoint_utils-inl.h"
#include "gc/heap.h"
+#include "jvalue-inl.h"
#include "mirror/class-inl.h"
#include "mirror/class_loader.h"
#include "mirror/object-inl.h"
diff --git a/runtime/entrypoints/quick/quick_thread_entrypoints.cc b/runtime/entrypoints/quick/quick_thread_entrypoints.cc
index 93422cf..5dca58a 100644
--- a/runtime/entrypoints/quick/quick_thread_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_thread_entrypoints.cc
@@ -21,16 +21,46 @@
namespace art {
+extern "C" void artDeoptimizeIfNeeded(Thread* self, uintptr_t result, bool is_ref)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ instrumentation::Instrumentation* instr = Runtime::Current()->GetInstrumentation();
+ DCHECK(!self->IsExceptionPending());
+
+ ArtMethod** sp = self->GetManagedStack()->GetTopQuickFrame();
+ DCHECK(sp != nullptr && (*sp)->IsRuntimeMethod());
+
+ DeoptimizationMethodType type = instr->GetDeoptimizationMethodType(*sp);
+ JValue jvalue;
+ jvalue.SetJ(result);
+ instr->DeoptimizeIfNeeded(self, sp, type, jvalue, is_ref);
+}
+
extern "C" void artTestSuspendFromCode(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_) {
// Called when there is a pending checkpoint or suspend request.
ScopedQuickEntrypointChecks sqec(self);
self->CheckSuspend();
+
+ // We could have other dex instructions at the same dex pc as suspend and we need to execute
+ // those instructions. So we should start executing from the current dex pc.
+ ArtMethod** sp = self->GetManagedStack()->GetTopQuickFrame();
+ JValue result;
+ result.SetJ(0);
+ Runtime::Current()->GetInstrumentation()->DeoptimizeIfNeeded(
+ self, sp, DeoptimizationMethodType::kKeepDexPc, result, /* is_ref= */ false);
}
extern "C" void artImplicitSuspendFromCode(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_) {
// Called when there is a pending checkpoint or suspend request.
ScopedQuickEntrypointChecks sqec(self);
self->CheckSuspend(/*implicit=*/ true);
+
+ // We could have other dex instructions at the same dex pc as suspend and we need to execute
+ // those instructions. So we should start executing from the current dex pc.
+ ArtMethod** sp = self->GetManagedStack()->GetTopQuickFrame();
+ JValue result;
+ result.SetJ(0);
+ Runtime::Current()->GetInstrumentation()->DeoptimizeIfNeeded(
+ self, sp, DeoptimizationMethodType::kKeepDexPc, result, /* is_ref= */ false);
}
extern "C" void artCompileOptimized(ArtMethod* method, Thread* self)
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
index b6ece4a..ea6501c 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -713,41 +713,34 @@
// Pop transition.
self->PopManagedStackFragment(fragment);
- // Request a stack deoptimization if needed
- ArtMethod* caller = QuickArgumentVisitor::GetCallingMethod(sp);
- uintptr_t caller_pc = QuickArgumentVisitor::GetCallingPc(sp);
+ // Check if caller needs to be deoptimized for instrumentation reasons.
+ instrumentation::Instrumentation* instr = Runtime::Current()->GetInstrumentation();
// If caller_pc is the instrumentation exit stub, the stub will check to see if deoptimization
// should be done and it knows the real return pc. NB If the upcall is null we don't need to do
// anything. This can happen during shutdown or early startup.
- if (UNLIKELY(
- caller != nullptr &&
- caller_pc != reinterpret_cast<uintptr_t>(GetQuickInstrumentationExitPc()) &&
- (self->IsForceInterpreter() || Dbg::IsForcedInterpreterNeededForUpcall(self, caller)))) {
- if (!Runtime::Current()->IsAsyncDeoptimizeable(caller_pc)) {
- LOG(WARNING) << "Got a deoptimization request on un-deoptimizable method "
- << caller->PrettyMethod();
- } else {
- VLOG(deopt) << "Forcing deoptimization on return from method " << method->PrettyMethod()
- << " to " << caller->PrettyMethod()
- << (force_frame_pop ? " for frame-pop" : "");
- DCHECK_IMPLIES(force_frame_pop, result.GetJ() == 0)
- << "Force frame pop should have no result.";
- if (force_frame_pop && self->GetException() != nullptr) {
- LOG(WARNING) << "Suppressing exception for instruction-retry: "
- << self->GetException()->Dump();
- }
- // Push the context of the deoptimization stack so we can restore the return value and the
- // exception before executing the deoptimized frames.
- self->PushDeoptimizationContext(
- result,
- shorty[0] == 'L' || shorty[0] == '[', /* class or array */
- force_frame_pop ? nullptr : self->GetException(),
- /* from_code= */ false,
- DeoptimizationMethodType::kDefault);
-
- // Set special exception to cause deoptimization.
- self->SetException(Thread::GetDeoptimizationException());
+ if (UNLIKELY(instr->ShouldDeoptimizeCaller(self, sp))) {
+ ArtMethod* caller = QuickArgumentVisitor::GetOuterMethod(sp);
+ uintptr_t caller_pc = QuickArgumentVisitor::GetCallingPc(sp);
+ DCHECK(Runtime::Current()->IsAsyncDeoptimizeable(caller_pc));
+ DCHECK(caller != nullptr);
+ VLOG(deopt) << "Forcing deoptimization on return from method " << method->PrettyMethod()
+ << " to " << caller->PrettyMethod() << (force_frame_pop ? " for frame-pop" : "");
+ DCHECK(!force_frame_pop || result.GetJ() == 0) << "Force frame pop should have no result.";
+ if (force_frame_pop && self->GetException() != nullptr) {
+ LOG(WARNING) << "Suppressing exception for instruction-retry: "
+ << self->GetException()->Dump();
}
+ DCHECK(self->GetException() != Thread::GetDeoptimizationException());
+ // Push the context of the deoptimization stack so we can restore the return value and the
+ // exception before executing the deoptimized frames.
+ self->PushDeoptimizationContext(result,
+ shorty[0] == 'L' || shorty[0] == '[', /* class or array */
+ force_frame_pop ? nullptr : self->GetException(),
+ /* from_code= */ false,
+ DeoptimizationMethodType::kDefault);
+
+ // Set special exception to cause deoptimization.
+ self->SetException(Thread::GetDeoptimizationException());
}
// No need to restore the args since the method has already been run by the interpreter.
@@ -862,7 +855,6 @@
instr->MethodEnterEvent(soa.Self(), proxy_method);
if (soa.Self()->IsExceptionPending()) {
instr->MethodUnwindEvent(self,
- soa.Decode<mirror::Object>(rcvr_jobj),
proxy_method,
0);
return 0;
@@ -872,7 +864,6 @@
if (soa.Self()->IsExceptionPending()) {
if (instr->HasMethodUnwindListeners()) {
instr->MethodUnwindEvent(self,
- soa.Decode<mirror::Object>(rcvr_jobj),
proxy_method,
0);
}
@@ -1065,6 +1056,7 @@
}
}
+ DCHECK(!method->IsRuntimeMethod());
instrumentation->PushInstrumentationStackFrame(self,
is_static ? nullptr : h_object.Get(),
method,
@@ -2091,7 +2083,7 @@
}
// Fix up managed-stack things in Thread. After this we can walk the stack.
- self->SetTopOfStackTagged(managed_sp);
+ self->SetTopOfStackGenericJniTagged(managed_sp);
self->VerifyStack();
@@ -2185,7 +2177,7 @@
// anything that requires a mutator lock before that would cause problems as GC may have the
// exclusive mutator lock and may be moving objects, etc.
ArtMethod** sp = self->GetManagedStack()->GetTopQuickFrame();
- DCHECK(self->GetManagedStack()->GetTopQuickFrameTag());
+ DCHECK(self->GetManagedStack()->GetTopQuickFrameGenericJniTag());
uint32_t* sp32 = reinterpret_cast<uint32_t*>(sp);
ArtMethod* called = *sp;
uint32_t cookie = *(sp32 - 1);
@@ -2589,6 +2581,10 @@
// Pop transition record.
self->PopManagedStackFragment(fragment);
+ bool is_ref = (shorty[0] == 'L');
+ Runtime::Current()->GetInstrumentation()->PushDeoptContextIfNeeded(
+ self, DeoptimizationMethodType::kDefault, is_ref, result);
+
return result.GetJ();
}
@@ -2647,6 +2643,10 @@
// Pop transition record.
self->PopManagedStackFragment(fragment);
+ bool is_ref = (shorty[0] == 'L');
+ Runtime::Current()->GetInstrumentation()->PushDeoptContextIfNeeded(
+ self, DeoptimizationMethodType::kDefault, is_ref, result);
+
return result.GetJ();
}
@@ -2677,7 +2677,7 @@
instrumentation::Instrumentation* instr = Runtime::Current()->GetInstrumentation();
DCHECK(instr->AreExitStubsInstalled());
bool is_ref;
- JValue return_value = instr->GetReturnValue(self, method, &is_ref, gpr_result, fpr_result);
+ JValue return_value = instr->GetReturnValue(method, &is_ref, gpr_result, fpr_result);
bool deoptimize = false;
{
StackHandleScope<1> hs(self);
@@ -2692,7 +2692,7 @@
// back to an upcall.
NthCallerVisitor visitor(self, 1, /*include_runtime_and_upcalls=*/false);
visitor.WalkStack(true);
- deoptimize = instr->ShouldDeoptimizeMethod(self, visitor);
+ deoptimize = instr->ShouldDeoptimizeCaller(self, visitor);
// If we need a deoptimization MethodExitEvent will be called by the interpreter when it
// re-executes the return instruction.
diff --git a/runtime/instrumentation.cc b/runtime/instrumentation.cc
index 1e328a3..c4dbc8c 100644
--- a/runtime/instrumentation.cc
+++ b/runtime/instrumentation.cc
@@ -55,6 +55,9 @@
#include "thread_list.h"
namespace art {
+extern "C" NO_RETURN void artDeoptimize(Thread* self);
+extern "C" NO_RETURN void artDeliverPendingExceptionFromCode(Thread* self);
+
namespace instrumentation {
constexpr bool kVerboseInstrumentation = false;
@@ -104,69 +107,6 @@
Instrumentation* const instrumentation_;
};
-InstrumentationStackPopper::InstrumentationStackPopper(Thread* self)
- : self_(self),
- instrumentation_(Runtime::Current()->GetInstrumentation()),
- pop_until_(0u) {}
-
-InstrumentationStackPopper::~InstrumentationStackPopper() {
- std::map<uintptr_t, instrumentation::InstrumentationStackFrame>* stack =
- self_->GetInstrumentationStack();
- for (auto i = stack->begin(); i != stack->end() && i->first <= pop_until_;) {
- i = stack->erase(i);
- }
-}
-
-bool InstrumentationStackPopper::PopFramesTo(uintptr_t stack_pointer,
- MutableHandle<mirror::Throwable>& exception) {
- std::map<uintptr_t, instrumentation::InstrumentationStackFrame>* stack =
- self_->GetInstrumentationStack();
- DCHECK(!self_->IsExceptionPending());
- if (!instrumentation_->HasMethodUnwindListeners()) {
- pop_until_ = stack_pointer;
- return true;
- }
- if (kVerboseInstrumentation) {
- LOG(INFO) << "Popping frames for exception " << exception->Dump();
- }
- // The instrumentation events expect the exception to be set.
- self_->SetException(exception.Get());
- bool new_exception_thrown = false;
- auto i = stack->upper_bound(pop_until_);
-
- // Now pop all frames until reaching stack_pointer, or a new exception is
- // thrown. Note that `stack_pointer` doesn't need to be a return PC address
- // (in fact the exception handling code passes the start of the frame where
- // the catch handler is).
- for (; i != stack->end() && i->first <= stack_pointer; i++) {
- const InstrumentationStackFrame& frame = i->second;
- ArtMethod* method = frame.method_;
- // Notify listeners of method unwind.
- // TODO: improve the dex_pc information here.
- uint32_t dex_pc = dex::kDexNoIndex;
- if (kVerboseInstrumentation) {
- LOG(INFO) << "Popping for unwind " << method->PrettyMethod();
- }
- if (!method->IsRuntimeMethod() && !frame.interpreter_entry_) {
- instrumentation_->MethodUnwindEvent(self_, frame.this_object_, method, dex_pc);
- new_exception_thrown = self_->GetException() != exception.Get();
- if (new_exception_thrown) {
- pop_until_ = i->first;
- break;
- }
- }
- }
- if (!new_exception_thrown) {
- pop_until_ = stack_pointer;
- }
- exception.Assign(self_->GetException());
- self_->ClearException();
- if (kVerboseInstrumentation && new_exception_thrown) {
- LOG(INFO) << "Did partial pop of frames due to new exception";
- }
- return !new_exception_thrown;
-}
-
Instrumentation::Instrumentation()
: current_force_deopt_id_(0),
instrumentation_stubs_installed_(false),
@@ -188,6 +128,51 @@
alloc_entrypoints_instrumented_(false) {
}
+bool Instrumentation::ProcessMethodUnwindCallbacks(Thread* self,
+ std::queue<ArtMethod*>& methods,
+ MutableHandle<mirror::Throwable>& exception) {
+ DCHECK(!self->IsExceptionPending());
+ if (!HasMethodUnwindListeners()) {
+ return true;
+ }
+ if (kVerboseInstrumentation) {
+ LOG(INFO) << "Popping frames for exception " << exception->Dump();
+ }
+ // The instrumentation events expect the exception to be set.
+ self->SetException(exception.Get());
+ bool new_exception_thrown = false;
+
+ // Process callbacks for all methods that would be unwound until a new exception is thrown.
+ while (!methods.empty()) {
+ ArtMethod* method = methods.front();
+ methods.pop();
+ if (kVerboseInstrumentation) {
+ LOG(INFO) << "Popping for unwind " << method->PrettyMethod();
+ }
+
+ if (method->IsRuntimeMethod()) {
+ continue;
+ }
+
+ // Notify listeners of method unwind.
+ // TODO: improve the dex_pc information here.
+ uint32_t dex_pc = dex::kDexNoIndex;
+ MethodUnwindEvent(self, method, dex_pc);
+ new_exception_thrown = self->GetException() != exception.Get();
+ if (new_exception_thrown) {
+ break;
+ }
+ }
+
+ exception.Assign(self->GetException());
+ self->ClearException();
+ if (kVerboseInstrumentation && new_exception_thrown) {
+ LOG(INFO) << "Did partial pop of frames due to new exception";
+ }
+ return !new_exception_thrown;
+}
+
+
void Instrumentation::InstallStubsForClass(ObjPtr<mirror::Class> klass) {
if (!klass->IsResolved()) {
// We need the class to be resolved to install/uninstall stubs. Otherwise its methods
@@ -227,32 +212,9 @@
method->GetDeclaringClass()->DescriptorEquals("Ljava/lang/reflect/Proxy;");
}
-static void UpdateEntryPoints(ArtMethod* method, const void* quick_code)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- if (kIsDebugBuild) {
- if (NeedsClinitCheckBeforeCall(method) &&
- !method->GetDeclaringClass()->IsVisiblyInitialized()) {
- CHECK(CanHandleInitializationCheck(quick_code));
- }
- jit::Jit* jit = Runtime::Current()->GetJit();
- if (jit != nullptr && jit->GetCodeCache()->ContainsPc(quick_code)) {
- // Ensure we always have the thumb entrypoint for JIT on arm32.
- if (kRuntimeISA == InstructionSet::kArm) {
- CHECK_EQ(reinterpret_cast<uintptr_t>(quick_code) & 1, 1u);
- }
- }
- if (IsProxyInit(method)) {
- CHECK_NE(quick_code, GetQuickInstrumentationEntryPoint());
- }
- }
- // If the method is from a boot image, don't dirty it if the entrypoint
- // doesn't change.
- if (method->GetEntryPointFromQuickCompiledCode() != quick_code) {
- method->SetEntryPointFromQuickCompiledCode(quick_code);
- }
-}
-
-bool Instrumentation::CodeNeedsEntryExitStub(const void* code, ArtMethod* method)
+// Returns true if we need entry exit stub to call entry hooks. JITed code
+// directly call entry / exit hooks and don't need the stub.
+static bool CodeNeedsEntryExitStub(const void* code, ArtMethod* method)
REQUIRES_SHARED(Locks::mutator_lock_) {
// Proxy.init should never have entry/exit stubs.
if (IsProxyInit(method)) {
@@ -291,6 +253,36 @@
return true;
}
+static void UpdateEntryPoints(ArtMethod* method, const void* quick_code)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ if (kIsDebugBuild) {
+ if (NeedsClinitCheckBeforeCall(method) &&
+ !method->GetDeclaringClass()->IsVisiblyInitialized()) {
+ CHECK(CanHandleInitializationCheck(quick_code));
+ }
+ jit::Jit* jit = Runtime::Current()->GetJit();
+ if (jit != nullptr && jit->GetCodeCache()->ContainsPc(quick_code)) {
+ // Ensure we always have the thumb entrypoint for JIT on arm32.
+ if (kRuntimeISA == InstructionSet::kArm) {
+ CHECK_EQ(reinterpret_cast<uintptr_t>(quick_code) & 1, 1u);
+ }
+ }
+ if (IsProxyInit(method)) {
+ CHECK_NE(quick_code, GetQuickInstrumentationEntryPoint());
+ }
+ const Instrumentation* instr = Runtime::Current()->GetInstrumentation();
+ if (instr->EntryExitStubsInstalled()) {
+ DCHECK(quick_code == GetQuickInstrumentationEntryPoint() ||
+ !CodeNeedsEntryExitStub(quick_code, method));
+ }
+ }
+ // If the method is from a boot image, don't dirty it if the entrypoint
+ // doesn't change.
+ if (method->GetEntryPointFromQuickCompiledCode() != quick_code) {
+ method->SetEntryPointFromQuickCompiledCode(quick_code);
+ }
+}
+
bool Instrumentation::InterpretOnly(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_) {
if (method->IsNative()) {
return false;
@@ -300,16 +292,11 @@
Runtime::Current()->GetRuntimeCallbacks()->IsMethodBeingInspected(method);
}
-static bool CanUseAotCode(ArtMethod* method, const void* quick_code)
+static bool CanUseAotCode(const void* quick_code)
REQUIRES_SHARED(Locks::mutator_lock_) {
if (quick_code == nullptr) {
return false;
}
- if (method->IsNative()) {
- // AOT code for native methods can always be used.
- return true;
- }
-
Runtime* runtime = Runtime::Current();
// For simplicity, we never use AOT code for debuggable.
if (runtime->IsJavaDebuggable()) {
@@ -345,7 +332,7 @@
// In debuggable mode, we can only use AOT code for native methods.
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
const void* aot_code = method->GetOatMethodQuickCode(class_linker->GetImagePointerSize());
- if (CanUseAotCode(method, aot_code)) {
+ if (CanUseAotCode(aot_code)) {
return aot_code;
}
@@ -404,7 +391,7 @@
}
// Use the provided AOT code if possible.
- if (CanUseAotCode(method, aot_code)) {
+ if (CanUseAotCode(aot_code)) {
UpdateEntryPoints(method, aot_code);
return;
}
@@ -482,15 +469,16 @@
instrumentation_exit_pc_(instrumentation_exit_pc),
reached_existing_instrumentation_frames_(false),
force_deopt_id_(force_deopt_id),
- deopt_all_frames_(deopt_all_frames) {}
+ deopt_all_frames_(deopt_all_frames),
+ runtime_methods_need_deopt_check_(false) {}
bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
ArtMethod* m = GetMethod();
- if (m == nullptr) {
+ if (m == nullptr || m->IsRuntimeMethod()) {
if (kVerboseInstrumentation) {
- LOG(INFO) << " Skipping upcall. Frame " << GetFrameId();
+ LOG(INFO) << " Skipping upcall / runtime method. Frame " << GetFrameId();
}
- return true; // Ignore upcalls.
+ return true; // Ignore upcalls and runtime methods.
}
if (GetCurrentQuickFrame() == nullptr) {
if (kVerboseInstrumentation) {
@@ -507,11 +495,6 @@
auto it = instrumentation_stack_->find(GetReturnPcAddr());
CHECK(it != instrumentation_stack_->end());
const InstrumentationStackFrame& frame = it->second;
- if (m->IsRuntimeMethod()) {
- if (frame.interpreter_entry_) {
- return true;
- }
- }
// We've reached a frame which has already been installed with instrumentation exit stub.
// We should have already installed instrumentation or be interpreter on previous frames.
@@ -534,12 +517,14 @@
const OatQuickMethodHeader* method_header = GetCurrentOatQuickMethodHeader();
if (method_header != nullptr && method_header->HasShouldDeoptimizeFlag()) {
if (deopt_all_frames_) {
+ runtime_methods_need_deopt_check_ = true;
SetShouldDeoptimizeFlag(DeoptimizeFlagValue::kDebug);
}
return true;
}
CHECK_NE(return_pc, 0U);
- if (UNLIKELY(reached_existing_instrumentation_frames_ && !m->IsRuntimeMethod())) {
+ DCHECK(!m->IsRuntimeMethod());
+ if (UNLIKELY(reached_existing_instrumentation_frames_)) {
// We already saw an existing instrumentation frame so this should be a runtime-method
// inserted by the interpreter or runtime.
std::string thread_name;
@@ -550,21 +535,9 @@
<< " return_pc is " << std::hex << return_pc;
UNREACHABLE();
}
- if (m->IsRuntimeMethod()) {
- size_t frame_size = GetCurrentQuickFrameInfo().FrameSizeInBytes();
- ArtMethod** caller_frame = reinterpret_cast<ArtMethod**>(
- reinterpret_cast<uint8_t*>(GetCurrentQuickFrame()) + frame_size);
- if (*caller_frame != nullptr && (*caller_frame)->IsNative()) {
- // Do not install instrumentation exit on return to JNI stubs.
- return true;
- }
- }
+
InstrumentationStackFrame instrumentation_frame(
- m->IsRuntimeMethod() ? nullptr : GetThisObject().Ptr(),
- m,
- return_pc,
- false,
- force_deopt_id_);
+ GetThisObject().Ptr(), m, return_pc, false, force_deopt_id_);
if (kVerboseInstrumentation) {
LOG(INFO) << "Pushing frame " << instrumentation_frame.Dump();
}
@@ -584,6 +557,7 @@
bool reached_existing_instrumentation_frames_;
uint64_t force_deopt_id_;
bool deopt_all_frames_;
+ bool runtime_methods_need_deopt_check_;
};
if (kVerboseInstrumentation) {
std::string thread_name;
@@ -601,6 +575,10 @@
deopt_all_frames);
visitor.WalkStack(true);
+ if (visitor.runtime_methods_need_deopt_check_) {
+ thread->SetDeoptCheckRequired(true);
+ }
+
if (instrumentation->ShouldNotifyMethodEnterExitEvents()) {
// Create method enter events for all methods currently on the thread's stack. We only do this
// if we haven't already processed the method enter events.
@@ -622,14 +600,16 @@
Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current());
struct RestoreStackVisitor final : public StackVisitor {
- RestoreStackVisitor(Thread* thread_in, uintptr_t instrumentation_exit_pc,
+ RestoreStackVisitor(Thread* thread_in,
+ uintptr_t instrumentation_exit_pc,
Instrumentation* instrumentation)
: StackVisitor(thread_in, nullptr, kInstrumentationStackWalk),
thread_(thread_in),
instrumentation_exit_pc_(instrumentation_exit_pc),
instrumentation_(instrumentation),
instrumentation_stack_(thread_in->GetInstrumentationStack()),
- frames_removed_(0) {}
+ frames_removed_(0),
+ runtime_methods_need_deopt_check_(false) {}
bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
if (instrumentation_stack_->size() == 0) {
@@ -643,11 +623,19 @@
}
return true; // Ignore shadow frames.
}
- if (m == nullptr) {
+ if (m == nullptr || m->IsRuntimeMethod()) {
if (kVerboseInstrumentation) {
- LOG(INFO) << " Skipping upcall. Frame " << GetFrameId();
+ LOG(INFO) << " Skipping upcall / runtime method. Frame " << GetFrameId();
}
- return true; // Ignore upcalls.
+ return true; // Ignore upcalls and runtime methods.
+ }
+ const OatQuickMethodHeader* method_header = GetCurrentOatQuickMethodHeader();
+ // For JITed frames, we don't install instrumentation stubs.
+ if (method_header != nullptr && method_header->HasShouldDeoptimizeFlag()) {
+ if (IsShouldDeoptimizeFlagForDebugSet()) {
+ runtime_methods_need_deopt_check_ = true;
+ }
+ return true;
}
auto it = instrumentation_stack_->find(GetReturnPcAddr());
if (it != instrumentation_stack_->end()) {
@@ -684,6 +672,7 @@
Instrumentation* const instrumentation_;
std::map<uintptr_t, instrumentation::InstrumentationStackFrame>* const instrumentation_stack_;
size_t frames_removed_;
+ bool runtime_methods_need_deopt_check_;
};
if (kVerboseInstrumentation) {
std::string thread_name;
@@ -698,6 +687,10 @@
reinterpret_cast<uintptr_t>(GetQuickInstrumentationExitPc());
RestoreStackVisitor visitor(thread, instrumentation_exit_pc, instrumentation);
visitor.WalkStack(true);
+ DCHECK_IMPLIES(visitor.runtime_methods_need_deopt_check_, thread->IsDeoptCheckRequired());
+ if (!visitor.runtime_methods_need_deopt_check_) {
+ thread->SetDeoptCheckRequired(false);
+ }
CHECK_EQ(visitor.frames_removed_, stack->size());
stack->clear();
}
@@ -1195,7 +1188,11 @@
UpdateEntryPoints(method, GetQuickToInterpreterBridge());
} else if (NeedsClinitCheckBeforeCall(method) &&
!method->GetDeclaringClass()->IsVisiblyInitialized()) {
- UpdateEntryPoints(method, GetQuickResolutionStub());
+ if (EntryExitStubsInstalled()) {
+ UpdateEntryPoints(method, GetQuickInstrumentationEntryPoint());
+ } else {
+ UpdateEntryPoints(method, GetQuickResolutionStub());
+ }
} else {
UpdateEntryPoints(method, GetMaybeInstrumentedCodeForInvoke(method));
}
@@ -1348,16 +1345,12 @@
}
void Instrumentation::MethodUnwindEvent(Thread* thread,
- ObjPtr<mirror::Object> this_object,
ArtMethod* method,
uint32_t dex_pc) const {
if (HasMethodUnwindListeners()) {
- Thread* self = Thread::Current();
- StackHandleScope<1> hs(self);
- Handle<mirror::Object> thiz(hs.NewHandle(this_object));
for (InstrumentationListener* listener : method_unwind_listeners_) {
if (listener != nullptr) {
- listener->MethodUnwind(thread, thiz, method, dex_pc);
+ listener->MethodUnwind(thread, method, dex_pc);
}
}
}
@@ -1492,7 +1485,7 @@
if (!interpreter_entry) {
MethodEnterEvent(self, method);
if (self->IsExceptionPending()) {
- MethodUnwindEvent(self, h_this.Get(), method, 0);
+ MethodUnwindEvent(self, method, 0);
return;
}
}
@@ -1521,83 +1514,18 @@
return DeoptimizationMethodType::kDefault;
}
-// Try to get the shorty of a runtime method if it's an invocation stub.
-static char GetRuntimeMethodShorty(Thread* thread) REQUIRES_SHARED(Locks::mutator_lock_) {
- char shorty = 'V';
- StackVisitor::WalkStack(
- [&shorty](const art::StackVisitor* stack_visitor) REQUIRES_SHARED(Locks::mutator_lock_) {
- ArtMethod* m = stack_visitor->GetMethod();
- if (m == nullptr || m->IsRuntimeMethod()) {
- return true;
- }
- // The first Java method.
- if (m->IsNative()) {
- // Use JNI method's shorty for the jni stub.
- shorty = m->GetShorty()[0];
- } else if (m->IsProxyMethod()) {
- // Proxy method just invokes its proxied method via
- // art_quick_proxy_invoke_handler.
- shorty = m->GetInterfaceMethodIfProxy(kRuntimePointerSize)->GetShorty()[0];
- } else {
- const Instruction& instr = m->DexInstructions().InstructionAt(stack_visitor->GetDexPc());
- if (instr.IsInvoke()) {
- uint16_t method_index = static_cast<uint16_t>(instr.VRegB());
- const DexFile* dex_file = m->GetDexFile();
- if (interpreter::IsStringInit(dex_file, method_index)) {
- // Invoking string init constructor is turned into invoking
- // StringFactory.newStringFromChars() which returns a string.
- shorty = 'L';
- } else {
- shorty = dex_file->GetMethodShorty(method_index)[0];
- }
-
- } else {
- // It could be that a non-invoke opcode invokes a stub, which in turn
- // invokes Java code. In such cases, we should never expect a return
- // value from the stub.
- }
- }
- // Stop stack walking since we've seen a Java frame.
- return false;
- },
- thread,
- /* context= */ nullptr,
- art::StackVisitor::StackWalkKind::kIncludeInlinedFrames);
- return shorty;
-}
-
-JValue Instrumentation::GetReturnValue(
- Thread* self, ArtMethod* method, bool* is_ref, uint64_t* gpr_result, uint64_t* fpr_result) {
+JValue Instrumentation::GetReturnValue(ArtMethod* method,
+ bool* is_ref,
+ uint64_t* gpr_result,
+ uint64_t* fpr_result) {
uint32_t length;
const PointerSize pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
- char return_shorty;
// Runtime method does not call into MethodExitEvent() so there should not be
// suspension point below.
ScopedAssertNoThreadSuspension ants(__FUNCTION__, method->IsRuntimeMethod());
- if (method->IsRuntimeMethod()) {
- Runtime* runtime = Runtime::Current();
- if (method != runtime->GetCalleeSaveMethod(CalleeSaveType::kSaveEverythingForClinit) &&
- method != runtime->GetCalleeSaveMethod(CalleeSaveType::kSaveEverythingForSuspendCheck)) {
- // If the caller is at an invocation point and the runtime method is not
- // for clinit, we need to pass return results to the caller.
- // We need the correct shorty to decide whether we need to pass the return
- // result for deoptimization below.
- return_shorty = GetRuntimeMethodShorty(self);
- } else {
- // Some runtime methods such as allocations, unresolved field getters, etc.
- // have return value. We don't need to set return_value since MethodExitEvent()
- // below isn't called for runtime methods. Deoptimization doesn't need the
- // value either since the dex instruction will be re-executed by the
- // interpreter, except these two cases:
- // (1) For an invoke, which is handled above to get the correct shorty.
- // (2) For MONITOR_ENTER/EXIT, which cannot be re-executed since it's not
- // idempotent. However there is no return value for it anyway.
- return_shorty = 'V';
- }
- } else {
- return_shorty = method->GetInterfaceMethodIfProxy(pointer_size)->GetShorty(&length)[0];
- }
+ DCHECK(!method->IsRuntimeMethod());
+ char return_shorty = method->GetInterfaceMethodIfProxy(pointer_size)->GetShorty(&length)[0];
*is_ref = return_shorty == '[' || return_shorty == 'L';
JValue return_value;
@@ -1611,27 +1539,141 @@
return return_value;
}
-bool Instrumentation::ShouldDeoptimizeMethod(Thread* self, const NthCallerVisitor& visitor) {
- bool should_deoptimize_frame = false;
- const OatQuickMethodHeader* header = visitor.GetCurrentOatQuickMethodHeader();
- if (header != nullptr && header->HasShouldDeoptimizeFlag()) {
- uint8_t should_deopt_flag = visitor.GetShouldDeoptimizeFlag();
- // DeoptimizeFlag could be set for debugging or for CHA invalidations.
- // Deoptimize here only if it was requested for debugging. CHA
- // invalidations are handled in the JITed code.
- if ((should_deopt_flag & static_cast<uint8_t>(DeoptimizeFlagValue::kDebug)) != 0) {
- should_deoptimize_frame = true;
- }
+bool Instrumentation::PushDeoptContextIfNeeded(Thread* self,
+ DeoptimizationMethodType deopt_type,
+ bool is_ref,
+ const JValue& return_value)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ if (self->IsExceptionPending()) {
+ return false;
}
- return (visitor.caller != nullptr) &&
- (InterpreterStubsInstalled() || IsDeoptimized(visitor.caller) ||
+
+ ArtMethod** sp = self->GetManagedStack()->GetTopQuickFrame();
+ DCHECK(sp != nullptr && (*sp)->IsRuntimeMethod());
+ if (!ShouldDeoptimizeCaller(self, sp)) {
+ return false;
+ }
+
+ // TODO(mythria): The current deopt behaviour is we just re-execute the
+ // alloc instruction so we don't need the return value. For instrumentation
+ // related deopts, we actually don't need to and can use the result we got
+ // here. Since this is a debug only feature it is not very important but
+ // consider reusing the result in future.
+ self->PushDeoptimizationContext(
+ return_value, is_ref, nullptr, /* from_code= */ false, deopt_type);
+ self->SetException(Thread::GetDeoptimizationException());
+ return true;
+}
+
+void Instrumentation::DeoptimizeIfNeeded(Thread* self,
+ ArtMethod** sp,
+ DeoptimizationMethodType type,
+ JValue return_value,
+ bool is_reference) {
+ if (self->IsAsyncExceptionPending() || ShouldDeoptimizeCaller(self, sp)) {
+ self->PushDeoptimizationContext(return_value,
+ is_reference,
+ nullptr,
+ /* from_code= */ false,
+ type);
+ artDeoptimize(self);
+ }
+}
+
+bool Instrumentation::NeedsSlowInterpreterForMethod(Thread* self, ArtMethod* method) {
+ return (method != nullptr) &&
+ (InterpreterStubsInstalled() ||
+ IsDeoptimized(method) ||
self->IsForceInterpreter() ||
// NB Since structurally obsolete compiled methods might have the offsets of
// methods/fields compiled in we need to go back to interpreter whenever we hit
// them.
- visitor.caller->GetDeclaringClass()->IsObsoleteObject() ||
- Dbg::IsForcedInterpreterNeededForUpcall(self, visitor.caller) ||
- should_deoptimize_frame);
+ method->GetDeclaringClass()->IsObsoleteObject() ||
+ Dbg::IsForcedInterpreterNeededForUpcall(self, method));
+}
+
+bool Instrumentation::ShouldDeoptimizeCaller(Thread* self, ArtMethod** sp) {
+ // When exit stubs aren't installed we don't need to check for any instrumentation related
+ // deoptimizations.
+ // TODO(mythria): Once we remove instrumentation stubs rename AreExitStubsInstalled. This is
+ // used to check if any instrumentation related work needs to be done. For ex: calling method
+ // entry / exit hooks, checking for instrumentation related deopts in suspend points
+ if (!AreExitStubsInstalled()) {
+ return false;
+ }
+
+ ArtMethod* runtime_method = *sp;
+ DCHECK(runtime_method->IsRuntimeMethod());
+ QuickMethodFrameInfo frame_info = Runtime::Current()->GetRuntimeMethodFrameInfo(runtime_method);
+
+ uintptr_t caller_sp = reinterpret_cast<uintptr_t>(sp) + frame_info.FrameSizeInBytes();
+ ArtMethod* caller = *(reinterpret_cast<ArtMethod**>(caller_sp));
+ uintptr_t caller_pc_addr = reinterpret_cast<uintptr_t>(sp) + frame_info.GetReturnPcOffset();
+ uintptr_t caller_pc = *reinterpret_cast<uintptr_t*>(caller_pc_addr);
+
+ if (caller == nullptr ||
+ caller->IsNative() ||
+ caller_pc == reinterpret_cast<uintptr_t>(GetQuickInstrumentationExitPc())) {
+ // If caller_pc is QuickInstrumentationExit then deoptimization will be handled by the
+ // instrumentation exit trampoline so we don't need to handle deoptimizations here.
+ // We need to check for a deoptimization here because when a redefinition happens it is
+ // not safe to use any compiled code because the field offsets might change. For native
+ // methods, we don't embed any field offsets so no need to check for a deoptimization.
+ // If the caller is null we don't need to do anything. This can happen when the caller
+ // is being interpreted by the switch interpreter (when called from
+ // artQuickToInterpreterBridge) / during shutdown / early startup.
+ return false;
+ }
+
+ if (NeedsSlowInterpreterForMethod(self, caller)) {
+ if (!Runtime::Current()->IsAsyncDeoptimizeable(caller_pc)) {
+ LOG(WARNING) << "Got a deoptimization request on un-deoptimizable method "
+ << caller->PrettyMethod();
+ return false;
+ }
+ return true;
+ }
+
+ // Non java debuggable apps don't support redefinition and hence it isn't required to check if
+ // frame needs to be deoptimized.
+ if (!Runtime::Current()->IsJavaDebuggable()) {
+ return false;
+ }
+
+ bool should_deoptimize_frame = false;
+ const OatQuickMethodHeader* header = caller->GetOatQuickMethodHeader(caller_pc);
+ if (header != nullptr && header->HasShouldDeoptimizeFlag()) {
+ DCHECK(header->IsOptimized());
+ uint8_t* should_deopt_flag_addr =
+ reinterpret_cast<uint8_t*>(caller_sp) + header->GetShouldDeoptimizeFlagOffset();
+ if ((*should_deopt_flag_addr & static_cast<uint8_t>(DeoptimizeFlagValue::kDebug)) != 0) {
+ should_deoptimize_frame = true;
+ }
+ }
+
+ if (should_deoptimize_frame && !Runtime::Current()->IsAsyncDeoptimizeable(caller_pc)) {
+ LOG(WARNING) << "Got a deoptimization request on un-deoptimizable method "
+ << caller->PrettyMethod();
+ return false;
+ }
+ return should_deoptimize_frame;
+}
+
+bool Instrumentation::ShouldDeoptimizeCaller(Thread* self, const NthCallerVisitor& visitor) {
+ bool should_deoptimize_frame = false;
+ if (visitor.caller != nullptr) {
+ const OatQuickMethodHeader* header = visitor.GetCurrentOatQuickMethodHeader();
+ if (header != nullptr && header->HasShouldDeoptimizeFlag()) {
+ uint8_t should_deopt_flag = visitor.GetShouldDeoptimizeFlag();
+ // DeoptimizeFlag could be set for debugging or for CHA invalidations.
+ // Deoptimize here only if it was requested for debugging. CHA
+ // invalidations are handled in the JITed code.
+ if ((should_deopt_flag & static_cast<uint8_t>(DeoptimizeFlagValue::kDebug)) != 0) {
+ should_deoptimize_frame = true;
+ }
+ }
+ }
+ return NeedsSlowInterpreterForMethod(self, visitor.caller) || should_deoptimize_frame;
}
TwoWordReturn Instrumentation::PopInstrumentationStackFrame(Thread* self,
@@ -1656,19 +1698,19 @@
self->VerifyStack();
ArtMethod* method = instrumentation_frame.method_;
+ DCHECK(!method->IsRuntimeMethod());
bool is_ref;
- JValue return_value = GetReturnValue(self, method, &is_ref, gpr_result, fpr_result);
+ JValue return_value = GetReturnValue(method, &is_ref, gpr_result, fpr_result);
StackHandleScope<1> hs(self);
MutableHandle<mirror::Object> res(hs.NewHandle<mirror::Object>(nullptr));
if (is_ref) {
// Take a handle to the return value so we won't lose it if we suspend.
- // FIXME: The `is_ref` is often guessed wrong, so even object aligment
- // assertion would fail for some tests. See b/204766614 .
- // DCHECK_ALIGNED(return_value.GetL(), kObjectAlignment);
+ DCHECK_ALIGNED(return_value.GetL(), kObjectAlignment);
res.Assign(return_value.GetL());
}
- if (!method->IsRuntimeMethod() && !instrumentation_frame.interpreter_entry_) {
+ if (!instrumentation_frame.interpreter_entry_) {
+ DCHECK(!method->IsRuntimeMethod());
// Note that sending the event may change the contents of *return_pc_addr.
MethodExitEvent(self, instrumentation_frame.method_, OptionalFrame{}, return_value);
}
@@ -1680,7 +1722,7 @@
// Check if we forced all threads to deoptimize in the time between this frame being created and
// now.
bool should_deoptimize_frame = instrumentation_frame.force_deopt_id_ != current_force_deopt_id_;
- bool deoptimize = ShouldDeoptimizeMethod(self, visitor) || should_deoptimize_frame;
+ bool deoptimize = ShouldDeoptimizeCaller(self, visitor) || should_deoptimize_frame;
if (is_ref) {
// Restore the return value if it's a reference since it might have moved.
@@ -1718,19 +1760,17 @@
}
}
-uintptr_t Instrumentation::PopFramesForDeoptimization(Thread* self, uintptr_t pop_until) const {
+uintptr_t Instrumentation::PopInstrumentationStackUntil(Thread* self, uintptr_t pop_until) const {
std::map<uintptr_t, instrumentation::InstrumentationStackFrame>* stack =
self->GetInstrumentationStack();
// Pop all instrumentation frames below `pop_until`.
uintptr_t return_pc = 0u;
for (auto i = stack->begin(); i != stack->end() && i->first <= pop_until;) {
- auto e = i;
- ++i;
if (kVerboseInstrumentation) {
- LOG(INFO) << "Popping for deoptimization " << e->second.method_->PrettyMethod();
+ LOG(INFO) << "Popping for deoptimization " << i->second.method_->PrettyMethod();
}
- return_pc = e->second.return_pc_;
- stack->erase(e);
+ return_pc = i->second.return_pc_;
+ i = stack->erase(i);
}
return return_pc;
}
diff --git a/runtime/instrumentation.h b/runtime/instrumentation.h
index b163109..48401f2 100644
--- a/runtime/instrumentation.h
+++ b/runtime/instrumentation.h
@@ -23,6 +23,7 @@
#include <list>
#include <memory>
#include <optional>
+#include <queue>
#include <unordered_set>
#include "arch/instruction_set.h"
@@ -31,6 +32,7 @@
#include "base/macros.h"
#include "base/safe_map.h"
#include "gc_root.h"
+#include "jvalue.h"
#include "offsets.h"
namespace art {
@@ -92,7 +94,6 @@
// Call-back for when a method is popped due to an exception throw. A method will either cause a
// MethodExited call-back or a MethodUnwind call-back when its activation is removed.
virtual void MethodUnwind(Thread* thread,
- Handle<mirror::Object> this_object,
ArtMethod* method,
uint32_t dex_pc)
REQUIRES_SHARED(Locks::mutator_lock_) = 0;
@@ -413,7 +414,6 @@
// Inform listeners that a method has been exited due to an exception.
void MethodUnwindEvent(Thread* thread,
- ObjPtr<mirror::Object> this_object,
ArtMethod* method,
uint32_t dex_pc) const
REQUIRES_SHARED(Locks::mutator_lock_);
@@ -479,12 +479,29 @@
void ExceptionHandledEvent(Thread* thread, ObjPtr<mirror::Throwable> exception_object) const
REQUIRES_SHARED(Locks::mutator_lock_);
- JValue GetReturnValue(Thread* self,
- ArtMethod* method,
- bool* is_ref,
- uint64_t* gpr_result,
- uint64_t* fpr_result) REQUIRES_SHARED(Locks::mutator_lock_);
- bool ShouldDeoptimizeMethod(Thread* self, const NthCallerVisitor& visitor)
+ JValue GetReturnValue(ArtMethod* method, bool* is_ref, uint64_t* gpr_result, uint64_t* fpr_result)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+ bool PushDeoptContextIfNeeded(Thread* self,
+ DeoptimizationMethodType deopt_type,
+ bool is_ref,
+ const JValue& result) REQUIRES_SHARED(Locks::mutator_lock_);
+ void DeoptimizeIfNeeded(Thread* self,
+ ArtMethod** sp,
+ DeoptimizationMethodType type,
+ JValue result,
+ bool is_ref) REQUIRES_SHARED(Locks::mutator_lock_);
+ // TODO(mythria): Update uses of ShouldDeoptimizeCaller that takes a visitor by a method that
+ // doesn't need to walk the stack. This is used on method exits to check if the caller needs a
+ // deoptimization.
+ bool ShouldDeoptimizeCaller(Thread* self, const NthCallerVisitor& visitor)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+ // This returns if the caller of runtime method requires a deoptimization. This checks both if the
+ // method requires a deopt or if this particular frame needs a deopt because of a class
+ // redefinition.
+ bool ShouldDeoptimizeCaller(Thread* self, ArtMethod** sp) REQUIRES_SHARED(Locks::mutator_lock_);
+ // This returns if the specified method requires a deoptimization. This doesn't account if a stack
+ // frame involving this method requires a deoptimization.
+ bool NeedsSlowInterpreterForMethod(Thread* self, ArtMethod* method)
REQUIRES_SHARED(Locks::mutator_lock_);
// Called when an instrumented method is entered. The intended link register (lr) is saved so
@@ -512,9 +529,9 @@
uint64_t* fpr_result)
REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!GetDeoptimizedMethodsLock());
- // Pops nframes instrumentation frames from the current thread. Returns the return pc for the last
- // instrumentation frame that's popped.
- uintptr_t PopFramesForDeoptimization(Thread* self, uintptr_t stack_pointer) const
+ // Pops instrumentation frames until the specified stack_pointer from the current thread. Returns
+ // the return pc for the last instrumentation frame that's popped.
+ uintptr_t PopInstrumentationStackUntil(Thread* self, uintptr_t stack_pointer) const
REQUIRES_SHARED(Locks::mutator_lock_);
// Call back for configure stubs.
@@ -555,6 +572,11 @@
return alloc_entrypoints_instrumented_;
}
+ bool ProcessMethodUnwindCallbacks(Thread* self,
+ std::queue<ArtMethod*>& methods,
+ MutableHandle<mirror::Throwable>& exception)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
InstrumentationLevel GetCurrentInstrumentationLevel() const;
private:
@@ -562,11 +584,6 @@
// False otherwise.
bool RequiresInstrumentationInstallation(InstrumentationLevel new_level) const;
- // Returns true if we need entry exit stub to call entry hooks. JITed code
- // directly call entry / exit hooks and don't need the stub.
- static bool CodeNeedsEntryExitStub(const void* code, ArtMethod* method)
- REQUIRES_SHARED(Locks::mutator_lock_);
-
// Update the current instrumentation_level_.
void UpdateInstrumentationLevel(InstrumentationLevel level);
diff --git a/runtime/instrumentation_test.cc b/runtime/instrumentation_test.cc
index b0a81b6..d58fb4a 100644
--- a/runtime/instrumentation_test.cc
+++ b/runtime/instrumentation_test.cc
@@ -77,7 +77,6 @@
}
void MethodUnwind(Thread* thread ATTRIBUTE_UNUSED,
- Handle<mirror::Object> this_object ATTRIBUTE_UNUSED,
ArtMethod* method ATTRIBUTE_UNUSED,
uint32_t dex_pc ATTRIBUTE_UNUSED)
override REQUIRES_SHARED(Locks::mutator_lock_) {
@@ -387,7 +386,7 @@
break;
}
case instrumentation::Instrumentation::kMethodUnwind:
- instr->MethodUnwindEvent(self, obj, method, dex_pc);
+ instr->MethodUnwindEvent(self, method, dex_pc);
break;
case instrumentation::Instrumentation::kDexPcMoved:
instr->DexPcMovedEvent(self, obj, method, dex_pc);
diff --git a/runtime/interpreter/interpreter.cc b/runtime/interpreter/interpreter.cc
index 38c94ab..ad674a7 100644
--- a/runtime/interpreter/interpreter.cc
+++ b/runtime/interpreter/interpreter.cc
@@ -270,35 +270,9 @@
CHECK_EQ(shadow_frame.GetDexPC(), 0u);
self->AssertNoPendingException();
}
- instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
ArtMethod *method = shadow_frame.GetMethod();
- if (UNLIKELY(instrumentation->HasMethodEntryListeners())) {
- instrumentation->MethodEnterEvent(self, method);
- if (UNLIKELY(shadow_frame.GetForcePopFrame())) {
- // The caller will retry this invoke or ignore the result. Just return immediately without
- // any value.
- DCHECK(Runtime::Current()->AreNonStandardExitsEnabled());
- JValue ret = JValue();
- PerformNonStandardReturn<MonitorState::kNoMonitorsLocked>(
- self, shadow_frame, ret, instrumentation, accessor.InsSize());
- return ret;
- }
- if (UNLIKELY(self->IsExceptionPending())) {
- instrumentation->MethodUnwindEvent(self,
- shadow_frame.GetThisObject(accessor.InsSize()),
- method,
- 0);
- JValue ret = JValue();
- if (UNLIKELY(shadow_frame.GetForcePopFrame())) {
- DCHECK(Runtime::Current()->AreNonStandardExitsEnabled());
- PerformNonStandardReturn<MonitorState::kNoMonitorsLocked>(
- self, shadow_frame, ret, instrumentation, accessor.InsSize());
- }
- return ret;
- }
- }
-
+ // If we can continue in JIT and have JITed code available execute JITed code.
if (!stay_in_interpreter && !self->IsForceInterpreter()) {
jit::Jit* jit = Runtime::Current()->GetJit();
if (jit != nullptr) {
@@ -320,6 +294,32 @@
}
}
}
+
+ instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
+ if (UNLIKELY(instrumentation->HasMethodEntryListeners())) {
+ instrumentation->MethodEnterEvent(self, method);
+ if (UNLIKELY(shadow_frame.GetForcePopFrame())) {
+ // The caller will retry this invoke or ignore the result. Just return immediately without
+ // any value.
+ DCHECK(Runtime::Current()->AreNonStandardExitsEnabled());
+ JValue ret = JValue();
+ PerformNonStandardReturn<MonitorState::kNoMonitorsLocked>(
+ self, shadow_frame, ret, instrumentation, accessor.InsSize());
+ return ret;
+ }
+ if (UNLIKELY(self->IsExceptionPending())) {
+ instrumentation->MethodUnwindEvent(self,
+ method,
+ 0);
+ JValue ret = JValue();
+ if (UNLIKELY(shadow_frame.GetForcePopFrame())) {
+ DCHECK(Runtime::Current()->AreNonStandardExitsEnabled());
+ PerformNonStandardReturn<MonitorState::kNoMonitorsLocked>(
+ self, shadow_frame, ret, instrumentation, accessor.InsSize());
+ }
+ return ret;
+ }
+ }
}
ArtMethod* method = shadow_frame.GetMethod();
@@ -476,6 +476,7 @@
const uint32_t dex_pc = shadow_frame->GetDexPC();
uint32_t new_dex_pc = dex_pc;
if (UNLIKELY(self->IsExceptionPending())) {
+ DCHECK(self->GetException() != Thread::GetDeoptimizationException());
// If we deoptimize from the QuickExceptionHandler, we already reported the exception throw
// event to the instrumentation. Skip throw listeners for the first frame. The deopt check
// should happen after the throw listener is called as throw listener can trigger a
diff --git a/runtime/interpreter/interpreter_common.cc b/runtime/interpreter/interpreter_common.cc
index c8a87c1..afba57e 100644
--- a/runtime/interpreter/interpreter_common.cc
+++ b/runtime/interpreter/interpreter_common.cc
@@ -185,7 +185,6 @@
// Exception is not caught by the current method. We will unwind to the
// caller. Notify any instrumentation listener.
instrumentation->MethodUnwindEvent(self,
- shadow_frame.GetThisObject(),
shadow_frame.GetMethod(),
shadow_frame.GetDexPC());
}
diff --git a/runtime/interpreter/mterp/arm64ng/object.S b/runtime/interpreter/mterp/arm64ng/object.S
index a2d4497..df044d9 100644
--- a/runtime/interpreter/mterp/arm64ng/object.S
+++ b/runtime/interpreter/mterp/arm64ng/object.S
@@ -99,7 +99,7 @@
// not going to slow path if the super class hierarchy check fails.
TEST_IF_MARKING 4f
ldr w3, [x1, #MIRROR_CLASS_ACCESS_FLAGS_OFFSET]
- tbnz w3, #MIRROR_CLASS_IS_INTERFACE_FLAG_BIT, 4f
+ tbnz w3, #MIRROR_CLASS_IS_INTERFACE_FLAG_BIT, 5f
ldr w3, [x1, #MIRROR_CLASS_COMPONENT_TYPE_OFFSET]
cbnz w3, 3f
1:
diff --git a/runtime/interpreter/mterp/armng/object.S b/runtime/interpreter/mterp/armng/object.S
index 8a393ce..7deffaf 100644
--- a/runtime/interpreter/mterp/armng/object.S
+++ b/runtime/interpreter/mterp/armng/object.S
@@ -101,7 +101,7 @@
TEST_IF_MARKING 4f
ldr r3, [r1, #MIRROR_CLASS_ACCESS_FLAGS_OFFSET]
tst r3, #MIRROR_CLASS_IS_INTERFACE_FLAG
- bne 4f
+ bne 5f
ldr r3, [r1, #MIRROR_CLASS_COMPONENT_TYPE_OFFSET]
cmp r3, #0
bne 3f
diff --git a/runtime/jit/jit.cc b/runtime/jit/jit.cc
index 5e01aaa..facf66a 100644
--- a/runtime/jit/jit.cc
+++ b/runtime/jit/jit.cc
@@ -208,7 +208,6 @@
// Jit GC for now (b/147208992).
if (code_cache->GetGarbageCollectCode()) {
code_cache->SetGarbageCollectCode(!jit_compiler_->GenerateDebugInfo() &&
- !Runtime::Current()->GetInstrumentation()->AreExitStubsInstalled() &&
!jit->JitAtFirstUse());
}
@@ -259,10 +258,14 @@
return true;
}
-bool Jit::CompileMethod(ArtMethod* method,
- Thread* self,
- CompilationKind compilation_kind,
- bool prejit) {
+bool Jit::CompileMethodInternal(ArtMethod* method,
+ Thread* self,
+ CompilationKind compilation_kind,
+ bool prejit) {
+ if (kIsDebugBuild) {
+ MutexLock mu(self, *Locks::jit_lock_);
+ CHECK(GetCodeCache()->IsMethodBeingCompiled(method, compilation_kind));
+ }
DCHECK(Runtime::Current()->UseJitCompilation());
DCHECK(!method->IsRuntimeMethod());
@@ -323,7 +326,7 @@
<< ArtMethod::PrettyMethod(method_to_compile)
<< " kind=" << compilation_kind;
bool success = jit_compiler_->CompileMethod(self, region, method_to_compile, compilation_kind);
- code_cache_->DoneCompiling(method_to_compile, self, compilation_kind);
+ code_cache_->DoneCompiling(method_to_compile, self);
if (!success) {
VLOG(jit) << "Failed to compile method "
<< ArtMethod::PrettyMethod(method_to_compile)
@@ -743,6 +746,48 @@
child_mapping_methods.Reset();
}
+class ScopedCompilation {
+ public:
+ ScopedCompilation(ScopedCompilation&& other) :
+ jit_(other.jit_),
+ method_(other.method_),
+ compilation_kind_(other.compilation_kind_),
+ owns_compilation_(other.owns_compilation_) {
+ other.owns_compilation_ = false;
+ }
+
+ ScopedCompilation(Jit* jit, ArtMethod* method, CompilationKind compilation_kind)
+ : jit_(jit),
+ method_(method),
+ compilation_kind_(compilation_kind),
+ owns_compilation_(true) {
+ MutexLock mu(Thread::Current(), *Locks::jit_lock_);
+ if (jit_->GetCodeCache()->IsMethodBeingCompiled(method_, compilation_kind_)) {
+ owns_compilation_ = false;
+ return;
+ }
+ jit_->GetCodeCache()->AddMethodBeingCompiled(method_, compilation_kind_);
+ }
+
+ bool OwnsCompilation() const {
+ return owns_compilation_;
+ }
+
+
+ ~ScopedCompilation() {
+ if (owns_compilation_) {
+ MutexLock mu(Thread::Current(), *Locks::jit_lock_);
+ jit_->GetCodeCache()->RemoveMethodBeingCompiled(method_, compilation_kind_);
+ }
+ }
+
+ private:
+ Jit* const jit_;
+ ArtMethod* const method_;
+ const CompilationKind compilation_kind_;
+ bool owns_compilation_;
+};
+
class JitCompileTask final : public Task {
public:
enum class TaskKind {
@@ -750,25 +795,16 @@
kPreCompile,
};
- JitCompileTask(ArtMethod* method, TaskKind task_kind, CompilationKind compilation_kind)
- : method_(method), kind_(task_kind), compilation_kind_(compilation_kind), klass_(nullptr) {
- ScopedObjectAccess soa(Thread::Current());
- // For a non-bootclasspath class, add a global ref to the class to prevent class unloading
- // until compilation is done.
- // When we precompile, this is either with boot classpath methods, or main
- // class loader methods, so we don't need to keep a global reference.
- if (method->GetDeclaringClass()->GetClassLoader() != nullptr &&
- kind_ != TaskKind::kPreCompile) {
- klass_ = soa.Vm()->AddGlobalRef(soa.Self(), method_->GetDeclaringClass());
- CHECK(klass_ != nullptr);
- }
- }
-
- ~JitCompileTask() {
- if (klass_ != nullptr) {
- ScopedObjectAccess soa(Thread::Current());
- soa.Vm()->DeleteGlobalRef(soa.Self(), klass_);
- }
+ JitCompileTask(ArtMethod* method,
+ TaskKind task_kind,
+ CompilationKind compilation_kind,
+ ScopedCompilation&& sc)
+ : method_(method),
+ kind_(task_kind),
+ compilation_kind_(compilation_kind),
+ scoped_compilation_(std::move(sc)) {
+ DCHECK(scoped_compilation_.OwnsCompilation());
+ DCHECK(!sc.OwnsCompilation());
}
void Run(Thread* self) override {
@@ -777,7 +813,7 @@
switch (kind_) {
case TaskKind::kCompile:
case TaskKind::kPreCompile: {
- Runtime::Current()->GetJit()->CompileMethod(
+ Runtime::Current()->GetJit()->CompileMethodInternal(
method_,
self,
compilation_kind_,
@@ -797,7 +833,7 @@
ArtMethod* const method_;
const TaskKind kind_;
const CompilationKind compilation_kind_;
- jobject klass_;
+ ScopedCompilation scoped_compilation_;
DISALLOW_IMPLICIT_CONSTRUCTORS(JitCompileTask);
};
@@ -1285,6 +1321,21 @@
}
}
+void Jit::AddCompileTask(Thread* self,
+ ArtMethod* method,
+ CompilationKind compilation_kind,
+ bool precompile) {
+ ScopedCompilation sc(this, method, compilation_kind);
+ if (!sc.OwnsCompilation()) {
+ return;
+ }
+ JitCompileTask::TaskKind task_kind = precompile
+ ? JitCompileTask::TaskKind::kPreCompile
+ : JitCompileTask::TaskKind::kCompile;
+ thread_pool_->AddTask(
+ self, new JitCompileTask(method, task_kind, compilation_kind, std::move(sc)));
+}
+
bool Jit::CompileMethodFromProfile(Thread* self,
ClassLinker* class_linker,
uint32_t method_idx,
@@ -1305,6 +1356,7 @@
// Already seen by another profile.
return false;
}
+ CompilationKind compilation_kind = CompilationKind::kOptimized;
const void* entry_point = method->GetEntryPointFromQuickCompiledCode();
if (class_linker->IsQuickToInterpreterBridge(entry_point) ||
class_linker->IsQuickGenericJniStub(entry_point) ||
@@ -1315,11 +1367,15 @@
VLOG(jit) << "JIT Zygote processing method " << ArtMethod::PrettyMethod(method)
<< " from profile";
method->SetPreCompiled();
+ ScopedCompilation sc(this, method, compilation_kind);
+ if (!sc.OwnsCompilation()) {
+ return false;
+ }
if (!add_to_queue) {
- CompileMethod(method, self, CompilationKind::kOptimized, /* prejit= */ true);
+ CompileMethodInternal(method, self, compilation_kind, /* prejit= */ true);
} else {
Task* task = new JitCompileTask(
- method, JitCompileTask::TaskKind::kPreCompile, CompilationKind::kOptimized);
+ method, JitCompileTask::TaskKind::kPreCompile, compilation_kind, std::move(sc));
if (compile_after_boot) {
AddPostBootTask(self, task);
} else {
@@ -1470,11 +1526,7 @@
// hotness threshold. If we're not only using the baseline compiler, enqueue a compilation
// task that will compile optimize the method.
if (!options_->UseBaselineCompiler()) {
- thread_pool_->AddTask(
- self,
- new JitCompileTask(method,
- JitCompileTask::TaskKind::kCompile,
- CompilationKind::kOptimized));
+ AddCompileTask(self, method, CompilationKind::kOptimized);
}
}
@@ -1494,23 +1546,17 @@
bool was_runtime_thread_;
};
-void Jit::MethodEntered(Thread* thread, ArtMethod* method) {
+void Jit::MethodEntered(Thread* self, ArtMethod* method) {
Runtime* runtime = Runtime::Current();
if (UNLIKELY(runtime->UseJitCompilation() && JitAtFirstUse())) {
ArtMethod* np_method = method->GetInterfaceMethodIfProxy(kRuntimePointerSize);
if (np_method->IsCompilable()) {
- // TODO(ngeoffray): For JIT at first use, use kPreCompile. Currently we don't due to
- // conflicts with jitzygote optimizations.
- JitCompileTask compile_task(
- method, JitCompileTask::TaskKind::kCompile, CompilationKind::kOptimized);
- // Fake being in a runtime thread so that class-load behavior will be the same as normal jit.
- ScopedSetRuntimeThread ssrt(thread);
- compile_task.Run(thread);
+ CompileMethod(method, self, CompilationKind::kOptimized, /* prejit= */ false);
}
return;
}
- AddSamples(thread, method);
+ AddSamples(self, method);
}
void Jit::WaitForCompilationToFinish(Thread* self) {
@@ -1615,7 +1661,6 @@
// Jit GC for now (b/147208992).
code_cache_->SetGarbageCollectCode(
!jit_compiler_->GenerateDebugInfo() &&
- !runtime->GetInstrumentation()->AreExitStubsInstalled() &&
!JitAtFirstUse());
if (is_system_server && runtime->HasImageWithProfile()) {
@@ -1740,9 +1785,7 @@
if (!method->IsNative() && !code_cache_->IsOsrCompiled(method)) {
// If we already have compiled code for it, nterp may be stuck in a loop.
// Compile OSR.
- thread_pool_->AddTask(
- self,
- new JitCompileTask(method, JitCompileTask::TaskKind::kCompile, CompilationKind::kOsr));
+ AddCompileTask(self, method, CompilationKind::kOsr);
}
return;
}
@@ -1776,17 +1819,27 @@
}
if (!method->IsNative() && GetCodeCache()->CanAllocateProfilingInfo()) {
- thread_pool_->AddTask(
- self,
- new JitCompileTask(method, JitCompileTask::TaskKind::kCompile, CompilationKind::kBaseline));
+ AddCompileTask(self, method, CompilationKind::kBaseline);
} else {
- thread_pool_->AddTask(
- self,
- new JitCompileTask(method,
- JitCompileTask::TaskKind::kCompile,
- CompilationKind::kOptimized));
+ AddCompileTask(self, method, CompilationKind::kOptimized);
}
}
+bool Jit::CompileMethod(ArtMethod* method,
+ Thread* self,
+ CompilationKind compilation_kind,
+ bool prejit) {
+ ScopedCompilation sc(this, method, compilation_kind);
+ // TODO: all current users of this method expect us to wait if it is being compiled.
+ if (!sc.OwnsCompilation()) {
+ return false;
+ }
+ // Fake being in a runtime thread so that class-load behavior will be the same as normal jit.
+ ScopedSetRuntimeThread ssrt(self);
+ // TODO(ngeoffray): For JIT at first use, use kPreCompile. Currently we don't due to
+ // conflicts with jitzygote optimizations.
+ return CompileMethodInternal(method, self, compilation_kind, prejit);
+}
+
} // namespace jit
} // namespace art
diff --git a/runtime/jit/jit.h b/runtime/jit/jit.h
index b439c8e..fd92451 100644
--- a/runtime/jit/jit.h
+++ b/runtime/jit/jit.h
@@ -53,6 +53,7 @@
namespace jit {
class JitCodeCache;
+class JitCompileTask;
class JitMemoryRegion;
class JitOptions;
@@ -461,6 +462,17 @@
static bool BindCompilerMethods(std::string* error_msg);
+ void AddCompileTask(Thread* self,
+ ArtMethod* method,
+ CompilationKind compilation_kind,
+ bool precompile = false);
+
+ bool CompileMethodInternal(ArtMethod* method,
+ Thread* self,
+ CompilationKind compilation_kind,
+ bool prejit)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
// JIT compiler
static void* jit_library_handle_;
static JitCompilerInterface* jit_compiler_;
@@ -507,6 +519,8 @@
// between the zygote and apps.
std::map<ArtMethod*, uint16_t> shared_method_counters_;
+ friend class art::jit::JitCompileTask;
+
DISALLOW_COPY_AND_ASSIGN(Jit);
};
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index 0b34688..4148660 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -1594,10 +1594,30 @@
return osr_code_map_.find(method) != osr_code_map_.end();
}
+void JitCodeCache::VisitRoots(RootVisitor* visitor) {
+ MutexLock mu(Thread::Current(), *Locks::jit_lock_);
+ UnbufferedRootVisitor root_visitor(visitor, RootInfo(kRootStickyClass));
+ for (ArtMethod* method : current_optimized_compilations_) {
+ method->VisitRoots(root_visitor, kRuntimePointerSize);
+ }
+ for (ArtMethod* method : current_baseline_compilations_) {
+ method->VisitRoots(root_visitor, kRuntimePointerSize);
+ }
+ for (ArtMethod* method : current_osr_compilations_) {
+ method->VisitRoots(root_visitor, kRuntimePointerSize);
+ }
+}
+
bool JitCodeCache::NotifyCompilationOf(ArtMethod* method,
Thread* self,
CompilationKind compilation_kind,
bool prejit) {
+ if (kIsDebugBuild) {
+ MutexLock mu(self, *Locks::jit_lock_);
+ // Note: the compilation kind may have been adjusted after what was passed initially.
+ // We really just want to check that the method is indeed being compiled.
+ CHECK(IsMethodBeingCompiled(method));
+ }
const void* existing_entry_point = method->GetEntryPointFromQuickCompiledCode();
if (compilation_kind != CompilationKind::kOsr && ContainsPc(existing_entry_point)) {
OatQuickMethodHeader* method_header =
@@ -1686,13 +1706,8 @@
}
}
}
- MutexLock mu(self, *Locks::jit_lock_);
- if (IsMethodBeingCompiled(method, compilation_kind)) {
- return false;
- }
- AddMethodBeingCompiled(method, compilation_kind);
- return true;
}
+ return true;
}
ProfilingInfo* JitCodeCache::NotifyCompilerUse(ArtMethod* method, Thread* self) {
@@ -1715,9 +1730,7 @@
it->second->DecrementInlineUse();
}
-void JitCodeCache::DoneCompiling(ArtMethod* method,
- Thread* self,
- CompilationKind compilation_kind) {
+void JitCodeCache::DoneCompiling(ArtMethod* method, Thread* self) {
DCHECK_EQ(Thread::Current(), self);
MutexLock mu(self, *Locks::jit_lock_);
if (UNLIKELY(method->IsNative())) {
@@ -1729,8 +1742,6 @@
// Failed to compile; the JNI compiler never fails, but the cache may be full.
jni_stubs_map_.erase(it); // Remove the entry added in NotifyCompilationOf().
} // else Commit() updated entrypoints of all methods in the JniStubData.
- } else {
- RemoveMethodBeingCompiled(method, compilation_kind);
}
}
diff --git a/runtime/jit/jit_code_cache.h b/runtime/jit/jit_code_cache.h
index fb861a4..a534ba9 100644
--- a/runtime/jit/jit_code_cache.h
+++ b/runtime/jit/jit_code_cache.h
@@ -215,7 +215,7 @@
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Locks::jit_lock_);
- void DoneCompiling(ArtMethod* method, Thread* self, CompilationKind compilation_kind)
+ void DoneCompiling(ArtMethod* method, Thread* self)
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Locks::jit_lock_);
@@ -403,6 +403,20 @@
ProfilingInfo* GetProfilingInfo(ArtMethod* method, Thread* self);
void ResetHotnessCounter(ArtMethod* method, Thread* self);
+ void VisitRoots(RootVisitor* visitor);
+
+ // Return whether `method` is being compiled with the given mode.
+ bool IsMethodBeingCompiled(ArtMethod* method, CompilationKind compilation_kind)
+ REQUIRES(Locks::jit_lock_);
+
+ // Remove `method` from the list of methods meing compiled with the given mode.
+ void RemoveMethodBeingCompiled(ArtMethod* method, CompilationKind compilation_kind)
+ REQUIRES(Locks::jit_lock_);
+
+ // Record that `method` is being compiled with the given mode.
+ void AddMethodBeingCompiled(ArtMethod* method, CompilationKind compilation_kind)
+ REQUIRES(Locks::jit_lock_);
+
private:
JitCodeCache();
@@ -492,18 +506,6 @@
REQUIRES(!Locks::jit_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
- // Record that `method` is being compiled with the given mode.
- void AddMethodBeingCompiled(ArtMethod* method, CompilationKind compilation_kind)
- REQUIRES(Locks::jit_lock_);
-
- // Remove `method` from the list of methods meing compiled with the given mode.
- void RemoveMethodBeingCompiled(ArtMethod* method, CompilationKind compilation_kind)
- REQUIRES(Locks::jit_lock_);
-
- // Return whether `method` is being compiled with the given mode.
- bool IsMethodBeingCompiled(ArtMethod* method, CompilationKind compilation_kind)
- REQUIRES(Locks::jit_lock_);
-
// Return whether `method` is being compiled in any mode.
bool IsMethodBeingCompiled(ArtMethod* method) REQUIRES(Locks::jit_lock_);
diff --git a/runtime/managed_stack.h b/runtime/managed_stack.h
index 04a27fe..0e7dfe3 100644
--- a/runtime/managed_stack.h
+++ b/runtime/managed_stack.h
@@ -43,6 +43,8 @@
// code.
class PACKED(4) ManagedStack {
public:
+ static size_t constexpr kTaggedJniSpMask = 0x3;
+
ManagedStack()
: tagged_top_quick_frame_(TaggedTopQuickFrame::CreateNotTagged(nullptr)),
link_(nullptr),
@@ -75,8 +77,12 @@
return tagged_top_quick_frame_.GetSp();
}
- bool GetTopQuickFrameTag() const {
- return tagged_top_quick_frame_.GetTag();
+ bool GetTopQuickFrameGenericJniTag() const {
+ return tagged_top_quick_frame_.GetGenericJniTag();
+ }
+
+ bool GetTopQuickFrameJitJniTag() const {
+ return tagged_top_quick_frame_.GetJitJniTag();
}
bool HasTopQuickFrame() const {
@@ -89,10 +95,10 @@
tagged_top_quick_frame_ = TaggedTopQuickFrame::CreateNotTagged(top);
}
- void SetTopQuickFrameTagged(ArtMethod** top) {
+ void SetTopQuickFrameGenericJniTagged(ArtMethod** top) {
DCHECK(top_shadow_frame_ == nullptr);
DCHECK_ALIGNED(top, 4u);
- tagged_top_quick_frame_ = TaggedTopQuickFrame::CreateTagged(top);
+ tagged_top_quick_frame_ = TaggedTopQuickFrame::CreateGenericJniTagged(top);
}
static constexpr size_t TaggedTopQuickFrameOffset() {
@@ -129,26 +135,30 @@
return TaggedTopQuickFrame(reinterpret_cast<uintptr_t>(sp));
}
- static TaggedTopQuickFrame CreateTagged(ArtMethod** sp) {
+ static TaggedTopQuickFrame CreateGenericJniTagged(ArtMethod** sp) {
DCHECK_ALIGNED(sp, 4u);
return TaggedTopQuickFrame(reinterpret_cast<uintptr_t>(sp) | 1u);
}
// Get SP known to be not tagged and non-null.
ArtMethod** GetSpKnownNotTagged() const {
- DCHECK(!GetTag());
+ DCHECK(!GetGenericJniTag() && !GetJitJniTag());
DCHECK_NE(tagged_sp_, 0u);
return reinterpret_cast<ArtMethod**>(tagged_sp_);
}
ArtMethod** GetSp() const {
- return reinterpret_cast<ArtMethod**>(tagged_sp_ & ~static_cast<uintptr_t>(1u));
+ return reinterpret_cast<ArtMethod**>(tagged_sp_ & ~static_cast<uintptr_t>(kTaggedJniSpMask));
}
- bool GetTag() const {
+ bool GetGenericJniTag() const {
return (tagged_sp_ & 1u) != 0u;
}
+ bool GetJitJniTag() const {
+ return (tagged_sp_ & 2u) != 0u;
+ }
+
uintptr_t GetTaggedSp() const {
return tagged_sp_;
}
diff --git a/runtime/metrics/reporter.cc b/runtime/metrics/reporter.cc
index a44066e..28ca997 100644
--- a/runtime/metrics/reporter.cc
+++ b/runtime/metrics/reporter.cc
@@ -126,10 +126,17 @@
// Configure the backends
if (config_.dump_to_logcat) {
- backends_.emplace_back(new LogBackend(LogSeverity::INFO));
+ backends_.emplace_back(new LogBackend(std::make_unique<TextFormatter>(), LogSeverity::INFO));
}
if (config_.dump_to_file.has_value()) {
- backends_.emplace_back(new FileBackend(config_.dump_to_file.value()));
+ std::unique_ptr<MetricsFormatter> formatter;
+ if (config_.metrics_format == "xml") {
+ formatter = std::make_unique<XmlFormatter>();
+ } else {
+ formatter = std::make_unique<TextFormatter>();
+ }
+
+ backends_.emplace_back(new FileBackend(std::move(formatter), config_.dump_to_file.value()));
}
if (config_.dump_to_statsd) {
auto backend = CreateStatsdBackend();
@@ -291,6 +298,7 @@
.dump_to_logcat = gFlags.MetricsWriteToLogcat(),
.dump_to_file = gFlags.MetricsWriteToFile.GetValueOptional(),
.dump_to_statsd = gFlags.MetricsWriteToStatsd(),
+ .metrics_format = gFlags.MetricsFormat(),
.period_spec = period_spec,
.reporting_num_mods = reporting_num_mods,
.reporting_mods = reporting_mods,
diff --git a/runtime/metrics/reporter.h b/runtime/metrics/reporter.h
index daeaf1f..af9e0ca 100644
--- a/runtime/metrics/reporter.h
+++ b/runtime/metrics/reporter.h
@@ -78,6 +78,9 @@
// If set, provides a file name to enable metrics logging to a file.
std::optional<std::string> dump_to_file;
+ // Provides the desired output format for metrics written to a file.
+ std::string metrics_format;
+
// The reporting period configuration.
std::optional<ReportingPeriodSpec> period_spec;
diff --git a/runtime/mirror/class.h b/runtime/mirror/class.h
index 90efce5..4c08bdd 100644
--- a/runtime/mirror/class.h
+++ b/runtime/mirror/class.h
@@ -553,7 +553,7 @@
// The size of java.lang.Class.class.
static uint32_t ClassClassSize(PointerSize pointer_size) {
// The number of vtable entries in java.lang.Class.
- uint32_t vtable_entries = Object::kVTableLength + 67;
+ uint32_t vtable_entries = Object::kVTableLength + 68;
return ComputeClassSize(true, vtable_entries, 0, 0, 4, 1, 0, pointer_size);
}
diff --git a/runtime/mirror/class_ext.h b/runtime/mirror/class_ext.h
index 4ce3b10..b805ea0 100644
--- a/runtime/mirror/class_ext.h
+++ b/runtime/mirror/class_ext.h
@@ -156,6 +156,10 @@
bool EnsureJniIdsArrayPresent(MemberOffset off, size_t count)
REQUIRES_SHARED(Locks::mutator_lock_);
+ // Backing store of user-defined values pertaining to a class.
+ // Maintained by the ClassValue class.
+ HeapReference<Object> class_value_map_;
+
// The saved error for this class being erroneous.
HeapReference<Throwable> erroneous_state_error_;
@@ -181,9 +185,10 @@
// classes sfields_ array or '0' if no id has been assigned to that field yet.
HeapReference<PointerArray> static_jfield_ids_;
+ int32_t pre_redefine_class_def_index_;
+
// Native pointer to DexFile and ClassDef index of this class before it was JVMTI-redefined.
int64_t pre_redefine_dex_file_ptr_;
- int32_t pre_redefine_class_def_index_;
friend struct art::ClassExtOffsets; // for verifying offset information
DISALLOW_IMPLICIT_CONSTRUCTORS(ClassExt);
diff --git a/runtime/native/java_lang_Class.cc b/runtime/native/java_lang_Class.cc
index da42e61..9a2c88b 100644
--- a/runtime/native/java_lang_Class.cc
+++ b/runtime/native/java_lang_Class.cc
@@ -32,6 +32,7 @@
#include "hidden_api.h"
#include "jni/jni_internal.h"
#include "mirror/class-alloc-inl.h"
+#include "mirror/class_ext.h"
#include "mirror/class-inl.h"
#include "mirror/class_loader.h"
#include "mirror/field.h"
@@ -750,6 +751,17 @@
return soa.AddLocalReference<jclass>(annotations::GetDeclaringClass(klass));
}
+static jobject Class_ensureExtDataPresent(JNIEnv* env, jobject javaThis) {
+ ScopedFastNativeObjectAccess soa(env);
+ StackHandleScope<2> hs(soa.Self());
+ Handle<mirror::Class> klass = hs.NewHandle(DecodeClass(soa, javaThis));
+
+ ObjPtr<mirror::Object> extDataPtr =
+ mirror::Class::EnsureExtDataPresent(klass, Thread::Current());
+
+ return soa.AddLocalReference<jobject>(extDataPtr);
+}
+
static jobject Class_newInstance(JNIEnv* env, jobject javaThis) {
ScopedFastNativeObjectAccess soa(env);
StackHandleScope<4> hs(soa.Self());
@@ -841,6 +853,7 @@
static JNINativeMethod gMethods[] = {
FAST_NATIVE_METHOD(Class, classForName,
"(Ljava/lang/String;ZLjava/lang/ClassLoader;)Ljava/lang/Class;"),
+ FAST_NATIVE_METHOD(Class, ensureExtDataPresent, "()Ldalvik/system/ClassExt;"),
FAST_NATIVE_METHOD(Class, getDeclaredAnnotation,
"(Ljava/lang/Class;)Ljava/lang/annotation/Annotation;"),
FAST_NATIVE_METHOD(Class, getDeclaredAnnotations, "()[Ljava/lang/annotation/Annotation;"),
diff --git a/runtime/oat.h b/runtime/oat.h
index 462d41c..14b389d 100644
--- a/runtime/oat.h
+++ b/runtime/oat.h
@@ -32,8 +32,8 @@
class PACKED(4) OatHeader {
public:
static constexpr std::array<uint8_t, 4> kOatMagic { { 'o', 'a', 't', '\n' } };
- // Last oat version changed reason: Revert^4 "bss support for inlining BCP into non-BCP".
- static constexpr std::array<uint8_t, 4> kOatVersion { { '2', '2', '5', '\0' } };
+ // Last oat version changed reason: Update deoptimization from runtime methods.
+ static constexpr std::array<uint8_t, 4> kOatVersion { { '2', '2', '6', '\0' } };
static constexpr const char* kDex2OatCmdLineKey = "dex2oat-cmdline";
static constexpr const char* kDebuggableKey = "debuggable";
diff --git a/runtime/oat_quick_method_header.h b/runtime/oat_quick_method_header.h
index e347588..0bbf23f 100644
--- a/runtime/oat_quick_method_header.h
+++ b/runtime/oat_quick_method_header.h
@@ -28,6 +28,11 @@
class ArtMethod;
+// Size in bytes of the should_deoptimize flag on stack.
+// We just need 4 bytes for our purpose regardless of the architecture. Frame size
+// calculation will automatically do alignment for the final frame size.
+static constexpr size_t kShouldDeoptimizeFlagSize = 4;
+
// OatQuickMethodHeader precedes the raw code chunk generated by the compiler.
class PACKED(4) OatQuickMethodHeader {
public:
@@ -145,6 +150,17 @@
return CodeInfo::DecodeFrameInfo(GetOptimizedCodeInfoPtr());
}
+ size_t GetShouldDeoptimizeFlagOffset() const {
+ DCHECK(IsOptimized());
+ QuickMethodFrameInfo frame_info = GetFrameInfo();
+ size_t frame_size = frame_info.FrameSizeInBytes();
+ size_t core_spill_size =
+ POPCOUNT(frame_info.CoreSpillMask()) * GetBytesPerGprSpillLocation(kRuntimeISA);
+ size_t fpu_spill_size =
+ POPCOUNT(frame_info.FpSpillMask()) * GetBytesPerFprSpillLocation(kRuntimeISA);
+ return frame_size - core_spill_size - fpu_spill_size - kShouldDeoptimizeFlagSize;
+ }
+
uintptr_t ToNativeQuickPc(ArtMethod* method,
const uint32_t dex_pc,
bool is_for_catch_handler,
diff --git a/runtime/quick_exception_handler.cc b/runtime/quick_exception_handler.cc
index 2a6929a..f8bfb5a 100644
--- a/runtime/quick_exception_handler.cc
+++ b/runtime/quick_exception_handler.cc
@@ -16,6 +16,7 @@
#include "quick_exception_handler.h"
#include <ios>
+#include <queue>
#include "arch/context.h"
#include "art_method-inl.h"
@@ -95,7 +96,19 @@
DCHECK(method->IsCalleeSaveMethod());
return true;
}
- return HandleTryItems(method);
+ bool continue_stack_walk = HandleTryItems(method);
+ // Collect methods for which MethodUnwind callback needs to be invoked. MethodUnwind callback
+ // can potentially throw, so we want to call these after we find the catch block.
+ // We stop the stack walk when we find the catch block. If we are ending the stack walk we don't
+ // have to unwind this method so don't record it.
+ if (continue_stack_walk) {
+ unwound_methods_.push(method);
+ }
+ return continue_stack_walk;
+ }
+
+ std::queue<ArtMethod*>& GetUnwoundMethods() {
+ return unwound_methods_;
}
private:
@@ -139,6 +152,9 @@
QuickExceptionHandler* const exception_handler_;
// The number of frames to skip searching for catches in.
uint32_t skip_frames_;
+ // The list of methods we would skip to reach the catch block. We record these to call
+ // MethodUnwind callbacks.
+ std::queue<ArtMethod*> unwound_methods_;
DISALLOW_COPY_AND_ASSIGN(CatchBlockStackVisitor);
};
@@ -147,7 +163,7 @@
// Note that this might change the exception being thrown.
void QuickExceptionHandler::FindCatch(ObjPtr<mirror::Throwable> exception) {
DCHECK(!is_deoptimization_);
- instrumentation::InstrumentationStackPopper popper(self_);
+ instrumentation::Instrumentation* instr = Runtime::Current()->GetInstrumentation();
// The number of total frames we have so far popped.
uint32_t already_popped = 0;
bool popped_to_top = true;
@@ -195,9 +211,13 @@
handler_method_header_->IsOptimized()) {
SetCatchEnvironmentForOptimizedHandler(&visitor);
}
- popped_to_top =
- popper.PopFramesTo(reinterpret_cast<uintptr_t>(handler_quick_frame_), exception_ref);
+ popped_to_top = instr->ProcessMethodUnwindCallbacks(self_,
+ visitor.GetUnwoundMethods(),
+ exception_ref);
} while (!popped_to_top);
+
+ // Pop off frames on instrumentation stack to keep it in sync with what is on the stack.
+ instr->PopInstrumentationStackUntil(self_, reinterpret_cast<uintptr_t>(handler_quick_frame_));
if (!clear_exception_) {
// Put exception back in root set with clear throw location.
self_->SetException(exception_ref.Get());
@@ -642,7 +662,7 @@
uintptr_t return_pc = 0;
if (method_tracing_active_) {
instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
- return_pc = instrumentation->PopFramesForDeoptimization(
+ return_pc = instrumentation->PopInstrumentationStackUntil(
self_, reinterpret_cast<uintptr_t>(handler_quick_frame_));
}
return return_pc;
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index e20f883..d2eb3bd 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -2457,6 +2457,9 @@
class_linker_->VisitRoots(visitor, flags);
jni_id_manager_->VisitRoots(visitor);
heap_->VisitAllocationRecords(visitor);
+ if (jit_ != nullptr) {
+ jit_->GetCodeCache()->VisitRoots(visitor);
+ }
if ((flags & kVisitRootFlagNewRoots) == 0) {
// Guaranteed to have no new roots in the constant roots.
VisitConstantRoots(visitor);
@@ -3144,15 +3147,19 @@
auto pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
for (auto& m : klass->GetMethods(pointer_size)) {
const void* code = m.GetEntryPointFromQuickCompiledCode();
+ // For java debuggable runtimes we also deoptimize native methods. For other cases (boot
+ // image profiling) we don't need to deoptimize native methods. If this changes also
+ // update Instrumentation::CanUseAotCode.
+ bool deoptimize_native_methods = Runtime::Current()->IsJavaDebuggable();
if (Runtime::Current()->GetHeap()->IsInBootImageOatFile(code) &&
- !m.IsNative() &&
+ (!m.IsNative() || deoptimize_native_methods) &&
!m.IsProxyMethod()) {
instrumentation_->InitializeMethodsCode(&m, /*aot_code=*/ nullptr);
}
if (Runtime::Current()->GetJit() != nullptr &&
Runtime::Current()->GetJit()->GetCodeCache()->IsInZygoteExecSpace(code) &&
- !m.IsNative()) {
+ (!m.IsNative() || deoptimize_native_methods)) {
DCHECK(!m.IsProxyMethod());
instrumentation_->InitializeMethodsCode(&m, /*aot_code=*/ nullptr);
}
diff --git a/runtime/stack.cc b/runtime/stack.cc
index 50a96d0..33d3668 100644
--- a/runtime/stack.cc
+++ b/runtime/stack.cc
@@ -800,10 +800,20 @@
// between GenericJNI frame and JIT-compiled JNI stub; the entrypoint may have
// changed since the frame was entered. The top quick frame tag indicates
// GenericJNI here, otherwise it's either AOT-compiled or JNI-compiled JNI stub.
- if (UNLIKELY(current_fragment->GetTopQuickFrameTag())) {
+ if (UNLIKELY(current_fragment->GetTopQuickFrameGenericJniTag())) {
// The generic JNI does not have any method header.
cur_oat_quick_method_header_ = nullptr;
+ } else if (UNLIKELY(current_fragment->GetTopQuickFrameJitJniTag())) {
+ // Should be JITed code.
+ Runtime* runtime = Runtime::Current();
+ const void* code = runtime->GetJit()->GetCodeCache()->GetJniStubCode(method);
+ CHECK(code != nullptr) << method->PrettyMethod();
+ cur_oat_quick_method_header_ = OatQuickMethodHeader::FromCodePointer(code);
} else {
+ // We are sure we are not running GenericJni here. Though the entry point could still be
+ // GenericJnistub. The entry point is usually JITed, AOT or instrumentation stub when
+ // instrumentation is enabled. It could be lso a resolution stub if the class isn't
+ // visibly initialized yet.
const void* existing_entry_point = method->GetEntryPointFromQuickCompiledCode();
CHECK(existing_entry_point != nullptr);
Runtime* runtime = Runtime::Current();
@@ -819,7 +829,11 @@
if (code != nullptr) {
cur_oat_quick_method_header_ = OatQuickMethodHeader::FromEntryPoint(code);
} else {
- // This must be a JITted JNI stub frame.
+ // This must be a JITted JNI stub frame. For non-debuggable runtimes we only generate
+ // JIT stubs if there are no AOT stubs for native methods. Since we checked for AOT
+ // code earlier, we must be running JITed code. For debuggable runtimes we might have
+ // JIT code even when AOT code is present but we tag SP in JITed JNI stubs
+ // in debuggable runtimes. This case is handled earlier.
CHECK(runtime->GetJit() != nullptr);
code = runtime->GetJit()->GetCodeCache()->GetJniStubCode(method);
CHECK(code != nullptr) << method->PrettyMethod();
diff --git a/runtime/stack.h b/runtime/stack.h
index 1b00b54..bfda57b 100644
--- a/runtime/stack.h
+++ b/runtime/stack.h
@@ -58,11 +58,6 @@
};
std::ostream& operator<<(std::ostream& os, VRegKind rhs);
-// Size in bytes of the should_deoptimize flag on stack.
-// We just need 4 bytes for our purpose regardless of the architecture. Frame size
-// calculation will automatically do alignment for the final frame size.
-static constexpr size_t kShouldDeoptimizeFlagSize = 4;
-
/*
* Our current stack layout.
* The Dalvik registers come first, followed by the
@@ -306,6 +301,11 @@
return *GetShouldDeoptimizeFlagAddr();
}
+ bool IsShouldDeoptimizeFlagForDebugSet() const REQUIRES_SHARED(Locks::mutator_lock_) {
+ uint8_t should_deopt_flag = GetShouldDeoptimizeFlag();
+ return (should_deopt_flag & static_cast<uint8_t>(DeoptimizeFlagValue::kDebug)) != 0;
+ }
+
private:
// Private constructor known in the case that num_frames_ has already been computed.
StackVisitor(Thread* thread,
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 78ba26d..e7c6bec 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -272,6 +272,7 @@
ObjPtr<mirror::Throwable> exception,
bool from_code,
DeoptimizationMethodType method_type) {
+ DCHECK(exception != Thread::GetDeoptimizationException());
DeoptimizationContextRecord* record = new DeoptimizationContextRecord(
return_value,
is_reference,
@@ -1980,6 +1981,9 @@
if (thread->IsStillStarting()) {
os << " (still starting up)";
}
+ if (thread->tls32_.disable_thread_flip_count != 0) {
+ os << " DisableFlipCount = " << thread->tls32_.disable_thread_flip_count;
+ }
os << "\n";
} else {
os << '"' << ::art::GetThreadName(tid) << '"'
@@ -3696,6 +3700,9 @@
ObjPtr<mirror::Throwable> exception = GetException();
CHECK(exception != nullptr);
if (exception == GetDeoptimizationException()) {
+ // This wasn't a real exception, so just clear it here. If there was an actual exception it
+ // will be recorded in the DeoptimizationContext and it will be restored later.
+ ClearException();
artDeoptimize(this);
UNREACHABLE();
}
@@ -4606,7 +4613,9 @@
CHECK(self_->IsExceptionPending()) << *self_;
ObjPtr<mirror::Throwable> old_suppressed(excp_.Get());
excp_.Assign(self_->GetException());
- LOG(WARNING) << message << "Suppressing old exception: " << old_suppressed->Dump();
+ if (old_suppressed != nullptr) {
+ LOG(WARNING) << message << "Suppressing old exception: " << old_suppressed->Dump();
+ }
self_->ClearException();
}
diff --git a/runtime/thread.h b/runtime/thread.h
index dd8b061..7ac4007 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -573,8 +573,8 @@
tlsPtr_.managed_stack.SetTopQuickFrame(top_method);
}
- void SetTopOfStackTagged(ArtMethod** top_method) {
- tlsPtr_.managed_stack.SetTopQuickFrameTagged(top_method);
+ void SetTopOfStackGenericJniTagged(ArtMethod** top_method) {
+ tlsPtr_.managed_stack.SetTopQuickFrameGenericJniTagged(top_method);
}
void SetTopOfShadowStack(ShadowFrame* top) {
@@ -766,6 +766,13 @@
OFFSETOF_MEMBER(tls_32bit_sized_values, is_gc_marking));
}
+ template <PointerSize pointer_size>
+ static constexpr ThreadOffset<pointer_size> DeoptCheckRequiredOffset() {
+ return ThreadOffset<pointer_size>(
+ OFFSETOF_MEMBER(Thread, tls32_) +
+ OFFSETOF_MEMBER(tls_32bit_sized_values, is_deopt_check_required));
+ }
+
static constexpr size_t IsGcMarkingSize() {
return sizeof(tls32_.is_gc_marking);
}
@@ -1017,6 +1024,10 @@
void SetIsGcMarkingAndUpdateEntrypoints(bool is_marking);
+ bool IsDeoptCheckRequired() const { return tls32_.is_deopt_check_required; }
+
+ void SetDeoptCheckRequired(bool flag) { tls32_.is_deopt_check_required = flag; }
+
bool GetWeakRefAccessEnabled() const; // Only safe for current thread.
void SetWeakRefAccessEnabled(bool enabled) {
@@ -1712,6 +1723,7 @@
thread_exit_check_count(0),
is_transitioning_to_runnable(false),
is_gc_marking(false),
+ is_deopt_check_required(false),
weak_ref_access_enabled(WeakRefAccessState::kVisiblyEnabled),
disable_thread_flip_count(0),
user_code_suspend_count(0),
@@ -1766,6 +1778,12 @@
// GC roots.
bool32_t is_gc_marking;
+ // True if we need to check for deoptimization when returning from the runtime functions. This
+ // is required only when a class is redefined to prevent executing code that has field offsets
+ // embedded. For non-debuggable apps redefinition is not allowed and this flag should always be
+ // set to false.
+ bool32_t is_deopt_check_required;
+
// Thread "interrupted" status; stays raised until queried or thrown.
Atomic<bool32_t> interrupted;
diff --git a/runtime/thread_list.cc b/runtime/thread_list.cc
index 6482e72..bfde711 100644
--- a/runtime/thread_list.cc
+++ b/runtime/thread_list.cc
@@ -1291,6 +1291,10 @@
DCHECK_EQ(self, Thread::Current());
CHECK_NE(self->GetState(), ThreadState::kRunnable);
Locks::mutator_lock_->AssertNotHeld(self);
+ if (self->tls32_.disable_thread_flip_count != 0) {
+ LOG(FATAL) << "Incomplete PrimitiveArrayCritical section at exit: " << *self << "count = "
+ << self->tls32_.disable_thread_flip_count;
+ }
VLOG(threads) << "ThreadList::Unregister() " << *self;
diff --git a/runtime/trace.cc b/runtime/trace.cc
index ec61726..6b4fb29 100644
--- a/runtime/trace.cc
+++ b/runtime/trace.cc
@@ -757,7 +757,6 @@
}
void Trace::MethodUnwind(Thread* thread,
- Handle<mirror::Object> this_object ATTRIBUTE_UNUSED,
ArtMethod* method,
uint32_t dex_pc ATTRIBUTE_UNUSED) {
uint32_t thread_clock_diff = 0;
diff --git a/runtime/trace.h b/runtime/trace.h
index c6f36e4..ab2fe8f 100644
--- a/runtime/trace.h
+++ b/runtime/trace.h
@@ -184,7 +184,6 @@
REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!unique_methods_lock_, !streaming_lock_)
override;
void MethodUnwind(Thread* thread,
- Handle<mirror::Object> this_object,
ArtMethod* method,
uint32_t dex_pc)
REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!unique_methods_lock_, !streaming_lock_)
diff --git a/test/048-reflect-v8/Android.bp b/test/048-reflect-v8/Android.bp
index 799dcfb..f43f4b0 100644
--- a/test/048-reflect-v8/Android.bp
+++ b/test/048-reflect-v8/Android.bp
@@ -15,12 +15,13 @@
java_test {
name: "art-run-test-048-reflect-v8",
defaults: ["art-run-test-defaults"],
- test_config_template: ":art-run-test-target-template",
+ test_config_template: ":art-run-test-target-cts-template",
srcs: ["src/**/*.java"],
data: [
":art-run-test-048-reflect-v8-expected-stdout",
":art-run-test-048-reflect-v8-expected-stderr",
],
+ test_suites: ["cts"],
}
// Test's expected standard output.
diff --git a/test/048-reflect-v8/test-metadata.json b/test/048-reflect-v8/test-metadata.json
new file mode 100644
index 0000000..975ada7
--- /dev/null
+++ b/test/048-reflect-v8/test-metadata.json
@@ -0,0 +1,3 @@
+{
+ "test_suites": ["cts"]
+}
diff --git a/test/1940-ddms-ext/ddm_ext.cc b/test/1940-ddms-ext/ddm_ext.cc
index 110ad64..6461bdd 100644
--- a/test/1940-ddms-ext/ddm_ext.cc
+++ b/test/1940-ddms-ext/ddm_ext.cc
@@ -21,8 +21,8 @@
// Test infrastructure
#include "jvmti_helper.h"
-#include "nativehelper/scoped_local_ref.h"
-#include "nativehelper/scoped_primitive_array.h"
+#include "scoped_local_ref.h"
+#include "scoped_primitive_array.h"
#include "test_env.h"
namespace art {
diff --git a/test/1940-ddms-ext/src-art/art/Test1940.java b/test/1940-ddms-ext/src-art/art/Test1940.java
index 605e409..7adcd4a 100644
--- a/test/1940-ddms-ext/src-art/art/Test1940.java
+++ b/test/1940-ddms-ext/src-art/art/Test1940.java
@@ -16,6 +16,7 @@
package art;
+import java.lang.reflect.Field;
import org.apache.harmony.dalvik.ddmc.*;
import dalvik.system.VMDebug;
@@ -50,18 +51,21 @@
}
}
- private static boolean chunkEq(Chunk a, Chunk b) {
- return a.type == b.type &&
- a.offset == b.offset &&
- a.length == b.length &&
- Arrays.equals(a.data, b.data);
+ private static boolean chunkEq(Chunk c1, Chunk c2) {
+ ChunkWrapper a = new ChunkWrapper(c1);
+ ChunkWrapper b = new ChunkWrapper(c2);
+ return a.type() == b.type() &&
+ a.offset() == b.offset() &&
+ a.length() == b.length() &&
+ Arrays.equals(a.data(), b.data());
}
private static String printChunk(Chunk k) {
- byte[] out = new byte[k.length];
- System.arraycopy(k.data, k.offset, out, 0, k.length);
+ ChunkWrapper c = new ChunkWrapper(k);
+ byte[] out = new byte[c.length()];
+ System.arraycopy(c.data(), c.offset(), out, 0, c.length());
return String.format("Chunk(Type: 0x%X, Len: %d, data: %s)",
- k.type, k.length, Arrays.toString(out));
+ c.type(), c.length(), Arrays.toString(out));
}
private static final class MyDdmHandler extends ChunkHandler {
@@ -73,7 +77,8 @@
// For this test we will simply calculate the checksum
ByteBuffer b = ByteBuffer.wrap(new byte[8]);
Adler32 a = new Adler32();
- a.update(req.data, req.offset, req.length);
+ ChunkWrapper reqWrapper = new ChunkWrapper(req);
+ a.update(reqWrapper.data(), reqWrapper.offset(), reqWrapper.length());
b.order(ByteOrder.BIG_ENDIAN);
long val = a.getValue();
b.putLong(val);
@@ -92,6 +97,49 @@
}
}
+
+ /**
+ * Wrapper for accessing the hidden fields in {@link Chunk} in CTS.
+ */
+ private static class ChunkWrapper {
+ private Chunk c;
+
+ ChunkWrapper(Chunk c) {
+ this.c = c;
+ }
+
+ int type() {
+ return c.type;
+ }
+
+ int length() {
+ try {
+ Field f = Chunk.class.getField("length");
+ return (int) f.get(c);
+ } catch (NoSuchFieldException | IllegalAccessException e) {
+ throw new RuntimeException(e);
+ }
+ }
+
+ byte[] data() {
+ try {
+ Field f = Chunk.class.getField("data");
+ return (byte[]) f.get(c);
+ } catch (NoSuchFieldException | IllegalAccessException e) {
+ throw new RuntimeException(e);
+ }
+ }
+
+ int offset() {
+ try {
+ Field f = Chunk.class.getField("offset");
+ return (int) f.get(c);
+ } catch (NoSuchFieldException | IllegalAccessException e) {
+ throw new RuntimeException(e);
+ }
+ }
+ }
+
public static final ChunkHandler SINGLE_HANDLER = new MyDdmHandler();
public static DdmHandler CURRENT_HANDLER;
diff --git a/test/2042-reference-processing/Android.bp b/test/2042-reference-processing/Android.bp
new file mode 100644
index 0000000..a73b8d0
--- /dev/null
+++ b/test/2042-reference-processing/Android.bp
@@ -0,0 +1,40 @@
+// Generated by `regen-test-files`. Do not edit manually.
+
+// Build rules for ART run-test `2042-reference-processing`.
+
+package {
+ // See: http://go/android-license-faq
+ // A large-scale-change added 'default_applicable_licenses' to import
+ // all of the 'license_kinds' from "art_license"
+ // to get the below license kinds:
+ // SPDX-license-identifier-Apache-2.0
+ default_applicable_licenses: ["art_license"],
+}
+
+// Test's Dex code.
+java_test {
+ name: "art-run-test-2042-reference-processing",
+ defaults: ["art-run-test-defaults"],
+ test_config_template: ":art-run-test-target-template",
+ srcs: ["src/**/*.java"],
+ data: [
+ ":art-run-test-2042-reference-processing-expected-stdout",
+ ":art-run-test-2042-reference-processing-expected-stderr",
+ ],
+}
+
+// Test's expected standard output.
+genrule {
+ name: "art-run-test-2042-reference-processing-expected-stdout",
+ out: ["art-run-test-2042-reference-processing-expected-stdout.txt"],
+ srcs: ["expected-stdout.txt"],
+ cmd: "cp -f $(in) $(out)",
+}
+
+// Test's expected standard error.
+genrule {
+ name: "art-run-test-2042-reference-processing-expected-stderr",
+ out: ["art-run-test-2042-reference-processing-expected-stderr.txt"],
+ srcs: ["expected-stderr.txt"],
+ cmd: "cp -f $(in) $(out)",
+}
diff --git a/test/2042-reference-processing/expected-stderr.txt b/test/2042-reference-processing/expected-stderr.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/2042-reference-processing/expected-stderr.txt
diff --git a/test/2042-reference-processing/expected-stdout.txt b/test/2042-reference-processing/expected-stdout.txt
new file mode 100644
index 0000000..8b3e832
--- /dev/null
+++ b/test/2042-reference-processing/expected-stdout.txt
@@ -0,0 +1,2 @@
+Starting
+Finished
diff --git a/test/2042-reference-processing/info.txt b/test/2042-reference-processing/info.txt
new file mode 100644
index 0000000..e2d7a99
--- /dev/null
+++ b/test/2042-reference-processing/info.txt
@@ -0,0 +1,6 @@
+A test for reference processing correctness.
+
+The emphasis here is on fundamental properties. In particular, references to
+unreachable referents should be enqueued, and this should ensure that uncleared
+References don't point to objects for which References were enqueued. We also
+check various other ordering properties for java.lang.ref.References.
diff --git a/test/2042-reference-processing/src/Main.java b/test/2042-reference-processing/src/Main.java
new file mode 100644
index 0000000..ed67052
--- /dev/null
+++ b/test/2042-reference-processing/src/Main.java
@@ -0,0 +1,311 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.lang.ref.PhantomReference;
+import java.lang.ref.Reference;
+import java.lang.ref.ReferenceQueue;
+import java.lang.ref.WeakReference;
+import java.math.BigInteger;
+import java.util.concurrent.ArrayBlockingQueue;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.HashMap;
+import java.util.TreeMap;
+
+/**
+ * Test that objects get finalized and their references cleared in the right order.
+ *
+ * We maintain a list of nominally MAX_LIVE_OBJS numbered finalizable objects.
+ * We then alternately drop the last 50, and add 50 more. When we see an object finalized
+ * or its reference cleared, we make sure that the preceding objects in its group of 50
+ * have also had their references cleared. We also perform a number of other more
+ * straightforward checks, such as ensuring that all references are eventually cleared,
+ * and all objects are finalized.
+ */
+public class Main {
+ // TODO(b/216481630) Enable CHECK_PHANTOM_REFS. This currently occasionally reports a few
+ // PhantomReferences as not enqueued. If this report is correct, this needs to be tracked
+ // down and fixed.
+ static final boolean CHECK_PHANTOM_REFS = false;
+
+ static final int MAX_LIVE_OBJS = 150;
+ static final int DROP_OBJS = 50; // Number of linked objects dropped in each batch.
+ static final int MIN_LIVE_OBJS = MAX_LIVE_OBJS - DROP_OBJS;
+ static final int TOTAL_OBJS = 200_000; // Allocate this many finalizable objects in total.
+ static final boolean REPORT_DROPS = false;
+ static volatile boolean pleaseStop;
+
+ AtomicInteger totalFinalized = new AtomicInteger(0);
+ Object phantomRefsLock = new Object();
+ int maxDropped = 0;
+ int liveObjects = 0;
+
+ // Number of next finalizable object to be allocated.
+ int nextAllocated = 0;
+
+ // List of finalizable objects in descending order. We add to the front and drop
+ // from the rear.
+ FinalizableObject listHead;
+
+ // A possibly incomplete list of FinalizableObject indices that were finalized, but
+ // have yet to be checked for consistency with reference processing.
+ ArrayBlockingQueue<Integer> finalized = new ArrayBlockingQueue<>(20_000);
+
+ // Maps from object number to Reference; Cleared references are deleted when queues are
+ // processed.
+ TreeMap<Integer, MyWeakReference> weakRefs = new TreeMap<>();
+ HashMap<Integer, MyPhantomReference> phantomRefs = new HashMap<>();
+
+ class FinalizableObject {
+ int n;
+ FinalizableObject next;
+ FinalizableObject(int num, FinalizableObject nextObj) {
+ n = num;
+ next = nextObj;
+ }
+ protected void finalize() {
+ if (!inPhantomRefs(n)) {
+ System.out.println("PhantomRef enqueued before finalizer ran");
+ }
+ totalFinalized.incrementAndGet();
+ if (!finalized.offer(n) && REPORT_DROPS) {
+ System.out.println("Dropped finalization of " + n);
+ }
+ }
+ }
+ ReferenceQueue<FinalizableObject> refQueue = new ReferenceQueue<>();
+ class MyWeakReference extends WeakReference<FinalizableObject> {
+ int n;
+ MyWeakReference(FinalizableObject obj) {
+ super(obj, refQueue);
+ n = obj.n;
+ }
+ };
+ class MyPhantomReference extends PhantomReference<FinalizableObject> {
+ int n;
+ MyPhantomReference(FinalizableObject obj) {
+ super(obj, refQueue);
+ n = obj.n;
+ }
+ }
+ boolean inPhantomRefs(int n) {
+ synchronized(phantomRefsLock) {
+ MyPhantomReference ref = phantomRefs.get(n);
+ if (ref == null) {
+ return false;
+ }
+ if (ref.n != n) {
+ System.out.println("phantomRef retrieval failed");
+ }
+ return true;
+ }
+ }
+
+ void CheckOKToClearWeak(int num) {
+ if (num > maxDropped) {
+ System.out.println("WeakRef to live object " + num + " was cleared/enqueued.");
+ }
+ int batchEnd = (num / DROP_OBJS + 1) * DROP_OBJS;
+ for (MyWeakReference wr : weakRefs.subMap(num + 1, batchEnd).values()) {
+ if (wr.n <= num || wr.n / DROP_OBJS != num / DROP_OBJS) {
+ throw new AssertionError("MyWeakReference logic error!");
+ }
+ // wr referent was dropped in same batch and precedes it in list.
+ if (wr.get() != null) {
+ // This violates the WeakReference spec, and can result in strong references
+ // to objects that have been cleaned.
+ System.out.println("WeakReference to " + wr.n
+ + " was erroneously cleared after " + num);
+ }
+ }
+ }
+
+ void CheckOKToClearPhantom(int num) {
+ if (num > maxDropped) {
+ System.out.println("PhantomRef to live object " + num + " was enqueued.");
+ }
+ MyWeakReference wr = weakRefs.get(num);
+ if (wr != null && wr.get() != null) {
+ System.out.println("PhantomRef cleared before WeakRef for " + num);
+ }
+ }
+
+ void emptyAndCheckQueues() {
+ // Check recently finalized objects for consistency with cleared references.
+ while (true) {
+ Integer num = finalized.poll();
+ if (num == null) {
+ break;
+ }
+ MyWeakReference wr = weakRefs.get(num);
+ if (wr != null) {
+ if (wr.n != num) {
+ System.out.println("Finalization logic error!");
+ }
+ if (wr.get() != null) {
+ System.out.println("Finalizing object with uncleared reference");
+ }
+ }
+ CheckOKToClearWeak(num);
+ }
+ // Check recently enqueued references for consistency.
+ while (true) {
+ Reference<FinalizableObject> ref = (Reference<FinalizableObject>) refQueue.poll();
+ if (ref == null) {
+ break;
+ }
+ if (ref instanceof MyWeakReference) {
+ MyWeakReference wr = (MyWeakReference) ref;
+ if (wr.get() != null) {
+ System.out.println("WeakRef " + wr.n + " enqueued but not cleared");
+ }
+ CheckOKToClearWeak(wr.n);
+ if (weakRefs.remove(Integer.valueOf(wr.n)) != ref) {
+ System.out.println("Missing WeakReference: " + wr.n);
+ }
+ } else if (ref instanceof MyPhantomReference) {
+ MyPhantomReference pr = (MyPhantomReference) ref;
+ CheckOKToClearPhantom(pr.n);
+ if (phantomRefs.remove(Integer.valueOf(pr.n)) != ref) {
+ System.out.println("Missing PhantomReference: " + pr.n);
+ }
+ } else {
+ System.out.println("Found unrecognized reference in queue");
+ }
+ }
+ }
+
+
+ /**
+ * Add n objects to the head of the list. These will be assigned the next n consecutive
+ * numbers after the current head of the list.
+ */
+ void addObjects(int n) {
+ for (int i = 0; i < n; ++i) {
+ int me = nextAllocated++;
+ listHead = new FinalizableObject(me, listHead);
+ weakRefs.put(me, new MyWeakReference(listHead));
+ synchronized(phantomRefsLock) {
+ phantomRefs.put(me, new MyPhantomReference(listHead));
+ }
+ }
+ liveObjects += n;
+ }
+
+ /**
+ * Drop n finalizable objects from the tail of the list. These are the lowest-numbered objects
+ * in the list.
+ */
+ void dropObjects(int n) {
+ FinalizableObject list = listHead;
+ FinalizableObject last = null;
+ if (n > liveObjects) {
+ System.out.println("Removing too many elements");
+ }
+ if (liveObjects == n) {
+ maxDropped = list.n;
+ listHead = null;
+ } else {
+ final int skip = liveObjects - n;
+ for (int i = 0; i < skip; ++i) {
+ last = list;
+ list = list.next;
+ }
+ int expected = nextAllocated - skip - 1;
+ if (list.n != expected) {
+ System.out.println("dropObjects found " + list.n + " but expected " + expected);
+ }
+ maxDropped = expected;
+ last.next = null;
+ }
+ liveObjects -= n;
+ }
+
+ void testLoop() {
+ System.out.println("Starting");
+ addObjects(MIN_LIVE_OBJS);
+ final int ITERS = (TOTAL_OBJS - MIN_LIVE_OBJS) / DROP_OBJS;
+ for (int i = 0; i < ITERS; ++i) {
+ addObjects(DROP_OBJS);
+ if (liveObjects != MAX_LIVE_OBJS) {
+ System.out.println("Unexpected live object count");
+ }
+ dropObjects(DROP_OBJS);
+ emptyAndCheckQueues();
+ }
+ dropObjects(MIN_LIVE_OBJS);
+ if (liveObjects != 0 || listHead != null) {
+ System.out.println("Unexpected live objecs at end");
+ }
+ if (maxDropped != TOTAL_OBJS - 1) {
+ System.out.println("Unexpected dropped object count: " + maxDropped);
+ }
+ for (int i = 0; i < 2; ++i) {
+ Runtime.getRuntime().gc();
+ System.runFinalization();
+ emptyAndCheckQueues();
+ }
+ if (!weakRefs.isEmpty()) {
+ System.out.println("Weak Reference map nonempty size = " + weakRefs.size());
+ }
+ if (CHECK_PHANTOM_REFS && !phantomRefs.isEmpty()) {
+ try {
+ Thread.sleep(500);
+ } catch (InterruptedException e) {
+ System.out.println("Unexpected interrupt");
+ }
+ if (!phantomRefs.isEmpty()) {
+ System.out.println("Phantom Reference map nonempty size = " + phantomRefs.size());
+ System.out.print("First elements:");
+ int i = 0;
+ for (MyPhantomReference pr : phantomRefs.values()) {
+ System.out.print(" " + pr.n);
+ if (++i > 10) {
+ break;
+ }
+ }
+ System.out.println("");
+ }
+ }
+ if (totalFinalized.get() != TOTAL_OBJS) {
+ System.out.println("Finalized only " + totalFinalized + " objects");
+ }
+ }
+
+ static Runnable causeGCs = new Runnable() {
+ public void run() {
+ // Allocate a lot.
+ BigInteger counter = BigInteger.ZERO;
+ while (!pleaseStop) {
+ counter = counter.add(BigInteger.TEN);
+ }
+ // Look at counter to reduce chance of optimizing out the allocation.
+ if (counter.longValue() % 10 != 0) {
+ System.out.println("Bad causeGCs counter value: " + counter);
+ }
+ }
+ };
+
+ public static void main(String[] args) throws Exception {
+ Main theTest = new Main();
+ Thread gcThread = new Thread(causeGCs);
+ gcThread.setDaemon(true); // Terminate if main thread dies.
+ gcThread.start();
+ theTest.testLoop();
+ pleaseStop = true;
+ gcThread.join();
+ System.out.println("Finished");
+ }
+}
diff --git a/test/2043-reference-pauses/Android.bp b/test/2043-reference-pauses/Android.bp
new file mode 100644
index 0000000..a84aea2
--- /dev/null
+++ b/test/2043-reference-pauses/Android.bp
@@ -0,0 +1,40 @@
+// Generated by `regen-test-files`. Do not edit manually.
+
+// Build rules for ART run-test `2043-reference-pauses`.
+
+package {
+ // See: http://go/android-license-faq
+ // A large-scale-change added 'default_applicable_licenses' to import
+ // all of the 'license_kinds' from "art_license"
+ // to get the below license kinds:
+ // SPDX-license-identifier-Apache-2.0
+ default_applicable_licenses: ["art_license"],
+}
+
+// Test's Dex code.
+java_test {
+ name: "art-run-test-2043-reference-pauses",
+ defaults: ["art-run-test-defaults"],
+ test_config_template: ":art-run-test-target-template",
+ srcs: ["src/**/*.java"],
+ data: [
+ ":art-run-test-2043-reference-pauses-expected-stdout",
+ ":art-run-test-2043-reference-pauses-expected-stderr",
+ ],
+}
+
+// Test's expected standard output.
+genrule {
+ name: "art-run-test-2043-reference-pauses-expected-stdout",
+ out: ["art-run-test-2043-reference-pauses-expected-stdout.txt"],
+ srcs: ["expected-stdout.txt"],
+ cmd: "cp -f $(in) $(out)",
+}
+
+// Test's expected standard error.
+genrule {
+ name: "art-run-test-2043-reference-pauses-expected-stderr",
+ out: ["art-run-test-2043-reference-pauses-expected-stderr.txt"],
+ srcs: ["expected-stderr.txt"],
+ cmd: "cp -f $(in) $(out)",
+}
diff --git a/test/2043-reference-pauses/expected-stderr.txt b/test/2043-reference-pauses/expected-stderr.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/2043-reference-pauses/expected-stderr.txt
diff --git a/test/2043-reference-pauses/expected-stdout.txt b/test/2043-reference-pauses/expected-stdout.txt
new file mode 100644
index 0000000..8b3e832
--- /dev/null
+++ b/test/2043-reference-pauses/expected-stdout.txt
@@ -0,0 +1,2 @@
+Starting
+Finished
diff --git a/test/2043-reference-pauses/info.txt b/test/2043-reference-pauses/info.txt
new file mode 100644
index 0000000..f76fa32
--- /dev/null
+++ b/test/2043-reference-pauses/info.txt
@@ -0,0 +1,5 @@
+Tests WeakReference processing and retention of objects needed by finalizers.
+
+Can be used as Reference.get() blocking benchmark by setting PRINT_TIMES to
+true. This will print maximum observed latencies for Reference.get() when
+significant memory is only reachable from SoftReferences and Finalizers.
diff --git a/test/2043-reference-pauses/src/Main.java b/test/2043-reference-pauses/src/Main.java
new file mode 100644
index 0000000..dc64d9a
--- /dev/null
+++ b/test/2043-reference-pauses/src/Main.java
@@ -0,0 +1,300 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.lang.ref.Reference;
+import java.lang.ref.WeakReference;
+import java.lang.ref.SoftReference;
+import java.math.BigInteger;
+import java.util.ArrayList;
+
+/**
+ * Basic test of WeakReferences with large amounts of memory that's only reachable through
+ * finalizers. Also makes sure that finalizer-reachable data is not collected.
+ * Can easily be modified to time Reference.get() blocking.
+ */
+public class Main {
+ static final boolean PRINT_TIMES = false; // true will cause benchmark failure.
+ // Data structures repeatedly allocated in background to trigger GC.
+ // Size of finalizer reachable trees.
+ static final int TREE_HEIGHT = 15; // Trees contain 2^TREE_HEIGHT -1 allocated objects.
+ // Number of finalizable tree-owning objects that exist at one point.
+ static final int N_RESURRECTING_OBJECTS = 10;
+ // Number of short-lived, not finalizer-reachable, objects allocated between trees.
+ static final int N_PLAIN_OBJECTS = 20_000;
+ // Number of SoftReferences to CBTs we allocate.
+ static final int N_SOFTREFS = 10;
+
+ static final boolean BACKGROUND_GC_THREAD = true;
+ static final int NBATCHES = 10;
+ static final int NREFS = PRINT_TIMES ? 1_000_000 : 300_000; // Multiple of NBATCHES.
+ static final int REFS_PER_BATCH = NREFS / NBATCHES;
+
+ static volatile boolean pleaseStop = false;
+
+ // Large array of WeakReferences filled and accessed by tests below.
+ ArrayList<WeakReference<Integer>> weakRefs = new ArrayList<>(NREFS);
+
+ /**
+ * Complete binary tree data structure. make(n) takes O(2^n) space.
+ */
+ static class CBT {
+ CBT left;
+ CBT right;
+ CBT(CBT l, CBT r) {
+ left = l;
+ right = r;
+ }
+ static CBT make(int n) {
+ if (n == 0) {
+ return null;
+ }
+ return new CBT(make(n - 1), make(n - 1));
+ }
+ /**
+ * Check that path described by bit-vector path has the correct length.
+ */
+ void check(int n, int path) {
+ CBT current = this;
+ for (int i = 0; i < n; i++, path = path >>> 1) {
+ // Unexpectedly short paths result in NPE.
+ if ((path & 1) == 0) {
+ current = current.left;
+ } else {
+ current = current.right;
+ }
+ }
+ if (current != null) {
+ System.out.println("Complete binary tree path too long");
+ }
+ }
+ }
+
+
+ /**
+ * A finalizable object that refers to O(2^TREE_HEIGHT) otherwise unreachable memory.
+ * When finalized, it creates a new identical object, making sure that one always stays
+ * around.
+ */
+ static class ResurrectingObject {
+ CBT stuff;
+ ResurrectingObject() {
+ stuff = CBT.make(TREE_HEIGHT);
+ }
+ static ResurrectingObject a[] = new ResurrectingObject[2];
+ static int i = 0;
+ static synchronized void allocOne() {
+ a[(++i) % 2] = new ResurrectingObject();
+ // Check the previous one to make it hard to optimize anything out.
+ if (i > 1) {
+ a[(i + 1) % 2].stuff.check(TREE_HEIGHT, i /* weirdly interpreted as path */);
+ }
+ }
+ protected void finalize() {
+ stuff.check(TREE_HEIGHT, 42 /* Some path descriptor */);
+ // Allocate a new one to replace this one.
+ allocOne();
+ }
+ }
+
+ void fillWeakRefs() {
+ for (int i = 0; i < NREFS; ++i) {
+ weakRefs.add(null);
+ }
+ }
+
+ /*
+ * Return maximum observed time in nanos to dereference a WeakReference to an unreachable
+ * object. weakRefs is presumed to be pre-filled to have the correct size.
+ */
+ long timeUnreachableInner() {
+ long maxNanos = 0;
+ // Fill weakRefs with WeakReferences to unreachable integers, a batch at a time.
+ // Then time and test .get() calls on carefully sampled array entries, some of which
+ // will have been cleared.
+ for (int i = 0; i < NBATCHES; ++i) {
+ for (int j = 0; j < REFS_PER_BATCH; ++j) {
+ weakRefs.set(i * REFS_PER_BATCH + j,
+ new WeakReference(new Integer(i * REFS_PER_BATCH + j)));
+ }
+ try {
+ Thread.sleep(50);
+ } catch (InterruptedException e) {
+ System.out.println("Unexpected exception");
+ }
+ // Iterate over the filled-in section of weakRefs, but look only at a subset of the
+ // elements, making sure the subsets for different top-level iterations are disjoint.
+ // Otherwise the get() calls here will extend the lifetimes of the referents, and we
+ // may never see any cleared WeakReferences.
+ for (int j = (i + 1) * REFS_PER_BATCH - i - 1; j >= 0; j -= NBATCHES) {
+ WeakReference<Integer> wr = weakRefs.get(j);
+ if (wr != null) {
+ long startNanos = System.nanoTime();
+ Integer referent = wr.get();
+ long totalNanos = System.nanoTime() - startNanos;
+ if (referent == null) {
+ // Optimization to reduce max space use and scanning time.
+ weakRefs.set(j, null);
+ }
+ maxNanos = Math.max(maxNanos, totalNanos);
+ if (referent != null && referent.intValue() != j) {
+ System.out.println("Unexpected referent; expected " + j + " got "
+ + referent.intValue());
+ }
+ }
+ }
+ }
+ return maxNanos;
+ }
+
+ /*
+ * Wrapper for the above that also checks that references were reclaimed.
+ * We do this separately to make sure any stack references from the core of the
+ * test are gone. Empirically, we otherwise sometimes see the zeroth WeakReference
+ * not reclaimed.
+ */
+ long timeUnreachable() {
+ long maxNanos = timeUnreachableInner();
+ Runtime.getRuntime().gc();
+ System.runFinalization(); // Presumed to wait for reference clearing.
+ for (int i = 0; i < NREFS; ++i) {
+ if (weakRefs.get(i) != null && weakRefs.get(i).get() != null) {
+ System.out.println("WeakReference to " + i + " wasn't cleared");
+ }
+ }
+ return maxNanos;
+ }
+
+ /**
+ * Return maximum observed time in nanos to dereference a WeakReference to a reachable
+ * object. Overwrites weakRefs, which is presumed to have NREFS entries already.
+ */
+ long timeReachable() {
+ long maxNanos = 0;
+ // Similar to the above, but we use WeakReferences to otherwise reachable objects,
+ // which should thus not get cleared.
+ Integer[] strongRefs = new Integer[NREFS];
+ for (int i = 0; i < NBATCHES; ++i) {
+ for (int j = i * REFS_PER_BATCH; j < (i + 1) * NREFS / NBATCHES; ++j) {
+ Integer newObj = new Integer(j);
+ strongRefs[j] = newObj;
+ weakRefs.set(j, new WeakReference(newObj));
+ }
+ for (int j = (i + 1) * REFS_PER_BATCH - 1; j >= 0; --j) {
+ WeakReference<Integer> wr = weakRefs.get(j);
+ long startNanos = System.nanoTime();
+ Integer referent = wr.get();
+ long totalNanos = System.nanoTime() - startNanos;
+ maxNanos = Math.max(maxNanos, totalNanos);
+ if (referent == null) {
+ System.out.println("Unexpectedly cleared referent at " + j);
+ } else if (referent.intValue() != j) {
+ System.out.println("Unexpected reachable referent; expected " + j + " got "
+ + referent.intValue());
+ }
+ }
+ }
+ Reference.reachabilityFence(strongRefs);
+ return maxNanos;
+ }
+
+ void runTest() {
+ System.out.println("Starting");
+ fillWeakRefs();
+ long unreachableNanos = timeUnreachable();
+ if (PRINT_TIMES) {
+ System.out.println("Finished timeUnrechable()");
+ }
+ long reachableNanos = timeReachable();
+ String unreachableMillis =
+ String. format("%,.3f", ((double) unreachableNanos) / 1_000_000);
+ String reachableMillis =
+ String. format("%,.3f", ((double) reachableNanos) / 1_000_000);
+ if (PRINT_TIMES) {
+ System.out.println(
+ "Max time for WeakReference.get (unreachable): " + unreachableMillis);
+ System.out.println(
+ "Max time for WeakReference.get (reachable): " + reachableMillis);
+ }
+ // Only report extremely egregious pauses to avoid spurious failures.
+ if (unreachableNanos > 10_000_000_000L) {
+ System.out.println("WeakReference.get (unreachable) time unreasonably long");
+ }
+ if (reachableNanos > 10_000_000_000L) {
+ System.out.println("WeakReference.get (reachable) time unreasonably long");
+ }
+ }
+
+ /**
+ * Allocate and GC a lot, while keeping significant amounts of finalizer and
+ * SoftReference-reachable memory around.
+ */
+ static Runnable allocFinalizable = new Runnable() {
+ public void run() {
+ // Allocate and drop some finalizable objects that take a long time
+ // to mark. Designed to be hard to optimize away. Each of these objects will
+ // build a new one in its finalizer before really going away.
+ ArrayList<SoftReference<CBT>> softRefs = new ArrayList<>(N_SOFTREFS);
+ for (int i = 0; i < N_SOFTREFS; ++i) {
+ // These should not normally get reclaimed, since we shouldn't run out of
+ // memory. They do increase tracing time.
+ softRefs.add(new SoftReference(CBT.make(TREE_HEIGHT)));
+ }
+ for (int i = 0; i < N_RESURRECTING_OBJECTS; ++i) {
+ ResurrectingObject.allocOne();
+ }
+ BigInteger counter = BigInteger.ZERO;
+ for (int i = 1; !pleaseStop; ++i) {
+ // Allocate a lot of short-lived objects, using BigIntegers to minimize the chance
+ // of the allocation getting optimized out. This makes things slightly more
+ // realistic, since not all objects will be finalizer reachable.
+ for (int j = 0; j < N_PLAIN_OBJECTS / 2; ++j) {
+ counter = counter.add(BigInteger.TEN);
+ }
+ // Look at counter to reduce chance of optimizing out the allocation.
+ if (counter.longValue() % 10 != 0) {
+ System.out.println("Bad allocFinalizable counter value: " + counter);
+ }
+ // Explicitly collect here, mostly to prevent heap growth. Otherwise we get
+ // ahead of the GC and eventually block on it.
+ Runtime.getRuntime().gc();
+ if (PRINT_TIMES && i % 100 == 0) {
+ System.out.println("Collected " + i + " times");
+ }
+ }
+ // To be safe, access softRefs.
+ final CBT sample = softRefs.get(N_SOFTREFS / 2).get();
+ if (sample != null) {
+ sample.check(TREE_HEIGHT, 47 /* some path descriptor */);
+ }
+ }
+ };
+
+ public static void main(String[] args) throws Exception {
+ Main theTest = new Main();
+ Thread allocThread = null;
+ if (BACKGROUND_GC_THREAD) {
+ allocThread = new Thread(allocFinalizable);
+ allocThread.setDaemon(true); // Terminate if main thread dies.
+ allocThread.start();
+ }
+ theTest.runTest();
+ if (BACKGROUND_GC_THREAD) {
+ pleaseStop = true;
+ allocThread.join();
+ }
+ System.out.println("Finished");
+ }
+}
diff --git a/test/597-deopt-busy-loop/src/FloatLoop.java b/test/597-deopt-busy-loop/src/FloatLoop.java
index 57667a6..7aadce4 100644
--- a/test/597-deopt-busy-loop/src/FloatLoop.java
+++ b/test/597-deopt-busy-loop/src/FloatLoop.java
@@ -51,6 +51,10 @@
}
}
+ // Create an empty int[] to force loading the int[] class before compiling $noinline$busyLoop.
+ // This makes sure the compiler can properly type int[] and not bail.
+ static int[] emptyArray = new int[0];
+
public void $noinline$busyLoop() {
Main.assertIsManaged();
diff --git a/test/719-varhandle-concurrency/src/Main.java b/test/719-varhandle-concurrency/src/Main.java
index 1c2135b..f89a0bb 100644
--- a/test/719-varhandle-concurrency/src/Main.java
+++ b/test/719-varhandle-concurrency/src/Main.java
@@ -39,7 +39,7 @@
}
private static final int TASK_COUNT = 10000;
- private static final int THREAD_COUNT = 100;
+ private static final int THREAD_COUNT = 20;
/* Each test may need several retries before a concurrent failure is seen. In the past, for a
* known bug, between 5 and 10 retries were sufficient. Use RETRIES to configure how many
* iterations to retry for each test scenario. However, to avoid the test running for too long,
diff --git a/test/913-heaps/heaps.cc b/test/913-heaps/heaps.cc
index 28a737d..98ea906 100644
--- a/test/913-heaps/heaps.cc
+++ b/test/913-heaps/heaps.cc
@@ -191,6 +191,12 @@
return 0;
}
+ // Ignore system classes, which may come from the JIT compiling a method
+ // in these classes.
+ if (reference_kind == JVMTI_HEAP_REFERENCE_SYSTEM_CLASS) {
+ return 0;
+ }
+
// Only check tagged objects.
if (tag == 0) {
return JVMTI_VISIT_OBJECTS;
diff --git a/test/988-method-trace/expected-stdout.txt b/test/988-method-trace/expected-stdout.txt
index 59077b9..c5ab4ed 100644
--- a/test/988-method-trace/expected-stdout.txt
+++ b/test/988-method-trace/expected-stdout.txt
@@ -13,13 +13,19 @@
..=> public java.lang.Object()
..<= public java.lang.Object() -> <null: null>
.<= public art.Test988$FibResult(java.lang.String,int,int) -> <null: null>
-.=> public boolean java.util.ArrayList.add(java.lang.Object)
-..=> private void java.util.ArrayList.ensureCapacityInternal(int)
+.=> static void art.Test988.addToResults(art.Test988$Printable)
+..=> public void java.util.ArrayList.ensureCapacity(int)
...=> private void java.util.ArrayList.ensureExplicitCapacity(int)
...<= private void java.util.ArrayList.ensureExplicitCapacity(int) -> <null: null>
-..<= private void java.util.ArrayList.ensureCapacityInternal(int) -> <null: null>
+..<= public void java.util.ArrayList.ensureCapacity(int) -> <null: null>
+..=> public boolean java.util.ArrayList.add(java.lang.Object)
+...=> private void java.util.ArrayList.ensureCapacityInternal(int)
+....=> private void java.util.ArrayList.ensureExplicitCapacity(int)
+....<= private void java.util.ArrayList.ensureExplicitCapacity(int) -> <null: null>
+...<= private void java.util.ArrayList.ensureCapacityInternal(int) -> <null: null>
fibonacci(30)=832040
-.<= public boolean java.util.ArrayList.add(java.lang.Object) -> <class java.lang.Boolean: true>
+..<= public boolean java.util.ArrayList.add(java.lang.Object) -> <class java.lang.Boolean: true>
+.<= static void art.Test988.addToResults(art.Test988$Printable) -> <null: null>
<= public static void art.Test988.doFibTest(int,java.util.function.IntUnaryOperator) -> <null: null>
=> art.Test988$RecurOp()
.=> public java.lang.Object()
@@ -62,13 +68,19 @@
..=> public java.lang.Object()
..<= public java.lang.Object() -> <null: null>
.<= public art.Test988$FibResult(java.lang.String,int,int) -> <null: null>
-.=> public boolean java.util.ArrayList.add(java.lang.Object)
-..=> private void java.util.ArrayList.ensureCapacityInternal(int)
+.=> static void art.Test988.addToResults(art.Test988$Printable)
+..=> public void java.util.ArrayList.ensureCapacity(int)
...=> private void java.util.ArrayList.ensureExplicitCapacity(int)
...<= private void java.util.ArrayList.ensureExplicitCapacity(int) -> <null: null>
-..<= private void java.util.ArrayList.ensureCapacityInternal(int) -> <null: null>
+..<= public void java.util.ArrayList.ensureCapacity(int) -> <null: null>
+..=> public boolean java.util.ArrayList.add(java.lang.Object)
+...=> private void java.util.ArrayList.ensureCapacityInternal(int)
+....=> private void java.util.ArrayList.ensureExplicitCapacity(int)
+....<= private void java.util.ArrayList.ensureExplicitCapacity(int) -> <null: null>
+...<= private void java.util.ArrayList.ensureCapacityInternal(int) -> <null: null>
fibonacci(5)=5
-.<= public boolean java.util.ArrayList.add(java.lang.Object) -> <class java.lang.Boolean: true>
+..<= public boolean java.util.ArrayList.add(java.lang.Object) -> <class java.lang.Boolean: true>
+.<= static void art.Test988.addToResults(art.Test988$Printable) -> <null: null>
<= public static void art.Test988.doFibTest(int,java.util.function.IntUnaryOperator) -> <null: null>
=> art.Test988$NativeOp()
.=> public java.lang.Object()
@@ -83,13 +95,19 @@
..=> public java.lang.Object()
..<= public java.lang.Object() -> <null: null>
.<= public art.Test988$FibResult(java.lang.String,int,int) -> <null: null>
-.=> public boolean java.util.ArrayList.add(java.lang.Object)
-..=> private void java.util.ArrayList.ensureCapacityInternal(int)
+.=> static void art.Test988.addToResults(art.Test988$Printable)
+..=> public void java.util.ArrayList.ensureCapacity(int)
...=> private void java.util.ArrayList.ensureExplicitCapacity(int)
...<= private void java.util.ArrayList.ensureExplicitCapacity(int) -> <null: null>
-..<= private void java.util.ArrayList.ensureCapacityInternal(int) -> <null: null>
+..<= public void java.util.ArrayList.ensureCapacity(int) -> <null: null>
+..=> public boolean java.util.ArrayList.add(java.lang.Object)
+...=> private void java.util.ArrayList.ensureCapacityInternal(int)
+....=> private void java.util.ArrayList.ensureExplicitCapacity(int)
+....<= private void java.util.ArrayList.ensureExplicitCapacity(int) -> <null: null>
+...<= private void java.util.ArrayList.ensureCapacityInternal(int) -> <null: null>
fibonacci(5)=5
-.<= public boolean java.util.ArrayList.add(java.lang.Object) -> <class java.lang.Boolean: true>
+..<= public boolean java.util.ArrayList.add(java.lang.Object) -> <class java.lang.Boolean: true>
+.<= static void art.Test988.addToResults(art.Test988$Printable) -> <null: null>
<= public static void art.Test988.doFibTest(int,java.util.function.IntUnaryOperator) -> <null: null>
=> art.Test988$IterOp()
.=> public java.lang.Object()
@@ -170,10 +188,10 @@
......=> private static java.lang.Object java.lang.Throwable.nativeFillInStackTrace()
......<= private static java.lang.Object java.lang.Throwable.nativeFillInStackTrace() -> <class [Ljava.lang.Object;: <non-deterministic>>
.....<= public synchronized java.lang.Throwable java.lang.Throwable.fillInStackTrace() -> <class java.lang.Error: java.lang.Error: Bad argument: -19 < 0
- art.Test988.iter_fibonacci(Test988.java:269)
- art.Test988$IterOp.applyAsInt(Test988.java:264)
- art.Test988.doFibTest(Test988.java:402)
- art.Test988.run(Test988.java:358)
+ art.Test988.iter_fibonacci(Test988.java:280)
+ art.Test988$IterOp.applyAsInt(Test988.java:275)
+ art.Test988.doFibTest(Test988.java:413)
+ art.Test988.run(Test988.java:369)
<additional hidden frames>
>
....<= public java.lang.Throwable(java.lang.String) -> <null: null>
@@ -184,19 +202,25 @@
..=> public java.lang.Object()
..<= public java.lang.Object() -> <null: null>
.<= public art.Test988$FibThrow(java.lang.String,int,java.lang.Throwable) -> <null: null>
-.=> public boolean java.util.ArrayList.add(java.lang.Object)
-..=> private void java.util.ArrayList.ensureCapacityInternal(int)
+.=> static void art.Test988.addToResults(art.Test988$Printable)
+..=> public void java.util.ArrayList.ensureCapacity(int)
...=> private void java.util.ArrayList.ensureExplicitCapacity(int)
...<= private void java.util.ArrayList.ensureExplicitCapacity(int) -> <null: null>
-..<= private void java.util.ArrayList.ensureCapacityInternal(int) -> <null: null>
+..<= public void java.util.ArrayList.ensureCapacity(int) -> <null: null>
+..=> public boolean java.util.ArrayList.add(java.lang.Object)
+...=> private void java.util.ArrayList.ensureCapacityInternal(int)
+....=> private void java.util.ArrayList.ensureExplicitCapacity(int)
+....<= private void java.util.ArrayList.ensureExplicitCapacity(int) -> <null: null>
+...<= private void java.util.ArrayList.ensureCapacityInternal(int) -> <null: null>
fibonacci(-19) -> java.lang.Error: Bad argument: -19 < 0
- art.Test988.iter_fibonacci(Test988.java:269)
- art.Test988$IterOp.applyAsInt(Test988.java:264)
- art.Test988.doFibTest(Test988.java:402)
- art.Test988.run(Test988.java:358)
+ art.Test988.iter_fibonacci(Test988.java:280)
+ art.Test988$IterOp.applyAsInt(Test988.java:275)
+ art.Test988.doFibTest(Test988.java:413)
+ art.Test988.run(Test988.java:369)
<additional hidden frames>
-.<= public boolean java.util.ArrayList.add(java.lang.Object) -> <class java.lang.Boolean: true>
+..<= public boolean java.util.ArrayList.add(java.lang.Object) -> <class java.lang.Boolean: true>
+.<= static void art.Test988.addToResults(art.Test988$Printable) -> <null: null>
<= public static void art.Test988.doFibTest(int,java.util.function.IntUnaryOperator) -> <null: null>
=> art.Test988$RecurOp()
.=> public java.lang.Object()
@@ -277,10 +301,10 @@
......=> private static java.lang.Object java.lang.Throwable.nativeFillInStackTrace()
......<= private static java.lang.Object java.lang.Throwable.nativeFillInStackTrace() -> <class [Ljava.lang.Object;: <non-deterministic>>
.....<= public synchronized java.lang.Throwable java.lang.Throwable.fillInStackTrace() -> <class java.lang.Error: java.lang.Error: Bad argument: -19 < 0
- art.Test988.fibonacci(Test988.java:291)
- art.Test988$RecurOp.applyAsInt(Test988.java:286)
- art.Test988.doFibTest(Test988.java:402)
- art.Test988.run(Test988.java:359)
+ art.Test988.fibonacci(Test988.java:302)
+ art.Test988$RecurOp.applyAsInt(Test988.java:297)
+ art.Test988.doFibTest(Test988.java:413)
+ art.Test988.run(Test988.java:370)
<additional hidden frames>
>
....<= public java.lang.Throwable(java.lang.String) -> <null: null>
@@ -291,19 +315,25 @@
..=> public java.lang.Object()
..<= public java.lang.Object() -> <null: null>
.<= public art.Test988$FibThrow(java.lang.String,int,java.lang.Throwable) -> <null: null>
-.=> public boolean java.util.ArrayList.add(java.lang.Object)
-..=> private void java.util.ArrayList.ensureCapacityInternal(int)
+.=> static void art.Test988.addToResults(art.Test988$Printable)
+..=> public void java.util.ArrayList.ensureCapacity(int)
...=> private void java.util.ArrayList.ensureExplicitCapacity(int)
...<= private void java.util.ArrayList.ensureExplicitCapacity(int) -> <null: null>
-..<= private void java.util.ArrayList.ensureCapacityInternal(int) -> <null: null>
+..<= public void java.util.ArrayList.ensureCapacity(int) -> <null: null>
+..=> public boolean java.util.ArrayList.add(java.lang.Object)
+...=> private void java.util.ArrayList.ensureCapacityInternal(int)
+....=> private void java.util.ArrayList.ensureExplicitCapacity(int)
+....<= private void java.util.ArrayList.ensureExplicitCapacity(int) -> <null: null>
+...<= private void java.util.ArrayList.ensureCapacityInternal(int) -> <null: null>
fibonacci(-19) -> java.lang.Error: Bad argument: -19 < 0
- art.Test988.fibonacci(Test988.java:291)
- art.Test988$RecurOp.applyAsInt(Test988.java:286)
- art.Test988.doFibTest(Test988.java:402)
- art.Test988.run(Test988.java:359)
+ art.Test988.fibonacci(Test988.java:302)
+ art.Test988$RecurOp.applyAsInt(Test988.java:297)
+ art.Test988.doFibTest(Test988.java:413)
+ art.Test988.run(Test988.java:370)
<additional hidden frames>
-.<= public boolean java.util.ArrayList.add(java.lang.Object) -> <class java.lang.Boolean: true>
+..<= public boolean java.util.ArrayList.add(java.lang.Object) -> <class java.lang.Boolean: true>
+.<= static void art.Test988.addToResults(art.Test988$Printable) -> <null: null>
<= public static void art.Test988.doFibTest(int,java.util.function.IntUnaryOperator) -> <null: null>
=> art.Test988$NativeOp()
.=> public java.lang.Object()
@@ -323,9 +353,9 @@
......<= private static java.lang.Object java.lang.Throwable.nativeFillInStackTrace() -> <class [Ljava.lang.Object;: <non-deterministic>>
.....<= public synchronized java.lang.Throwable java.lang.Throwable.fillInStackTrace() -> <class java.lang.Error: java.lang.Error: bad argument
art.Test988.nativeFibonacci(Native Method)
- art.Test988$NativeOp.applyAsInt(Test988.java:301)
- art.Test988.doFibTest(Test988.java:402)
- art.Test988.run(Test988.java:360)
+ art.Test988$NativeOp.applyAsInt(Test988.java:312)
+ art.Test988.doFibTest(Test988.java:413)
+ art.Test988.run(Test988.java:371)
<additional hidden frames>
>
....<= public java.lang.Throwable(java.lang.String) -> <null: null>
@@ -336,19 +366,25 @@
..=> public java.lang.Object()
..<= public java.lang.Object() -> <null: null>
.<= public art.Test988$FibThrow(java.lang.String,int,java.lang.Throwable) -> <null: null>
-.=> public boolean java.util.ArrayList.add(java.lang.Object)
-..=> private void java.util.ArrayList.ensureCapacityInternal(int)
+.=> static void art.Test988.addToResults(art.Test988$Printable)
+..=> public void java.util.ArrayList.ensureCapacity(int)
...=> private void java.util.ArrayList.ensureExplicitCapacity(int)
...<= private void java.util.ArrayList.ensureExplicitCapacity(int) -> <null: null>
-..<= private void java.util.ArrayList.ensureCapacityInternal(int) -> <null: null>
+..<= public void java.util.ArrayList.ensureCapacity(int) -> <null: null>
+..=> public boolean java.util.ArrayList.add(java.lang.Object)
+...=> private void java.util.ArrayList.ensureCapacityInternal(int)
+....=> private void java.util.ArrayList.ensureExplicitCapacity(int)
+....<= private void java.util.ArrayList.ensureExplicitCapacity(int) -> <null: null>
+...<= private void java.util.ArrayList.ensureCapacityInternal(int) -> <null: null>
fibonacci(-19) -> java.lang.Error: bad argument
art.Test988.nativeFibonacci(Native Method)
- art.Test988$NativeOp.applyAsInt(Test988.java:301)
- art.Test988.doFibTest(Test988.java:402)
- art.Test988.run(Test988.java:360)
+ art.Test988$NativeOp.applyAsInt(Test988.java:312)
+ art.Test988.doFibTest(Test988.java:413)
+ art.Test988.run(Test988.java:371)
<additional hidden frames>
-.<= public boolean java.util.ArrayList.add(java.lang.Object) -> <class java.lang.Boolean: true>
+..<= public boolean java.util.ArrayList.add(java.lang.Object) -> <class java.lang.Boolean: true>
+.<= static void art.Test988.addToResults(art.Test988$Printable) -> <null: null>
<= public static void art.Test988.doFibTest(int,java.util.function.IntUnaryOperator) -> <null: null>
=> public final void <non-deterministic-type 0>.run()
.=> private static java.lang.Object java.lang.reflect.Proxy.invoke(java.lang.reflect.Proxy,java.lang.reflect.Method,java.lang.Object[]) throws java.lang.Throwable
diff --git a/test/988-method-trace/src/art/Test988.java b/test/988-method-trace/src/art/Test988.java
index 9c8ce4a..227ce62 100644
--- a/test/988-method-trace/src/art/Test988.java
+++ b/test/988-method-trace/src/art/Test988.java
@@ -253,11 +253,22 @@
}
}
- private static List<Printable> results = new ArrayList<>();
+ private static ArrayList<Printable> results = new ArrayList<>();
+ private static int results_index = 0;
// Starts with => enableMethodTracing
// .=> enableTracing
private static int cnt = 2;
+ static void addToResults(Printable obj) {
+ // Reserve space for the current object. If any other method entry callbacks are called they
+ // will reserve more space. Without this we may get into strange problems where ArrayList::add
+ // cecks there is enough space (which involves a couple of method calls) which then use up the
+ // space and by the time we actually add this record there is no capacity left.
+ results_index++;
+ results.ensureCapacity(results_index + 1);
+ results.add(obj);
+ }
+
// Iterative version
static final class IterOp implements IntUnaryOperator {
public int applyAsInt(int x) {
@@ -319,7 +330,7 @@
if ((cnt - 1) > METHOD_TRACING_IGNORE_DEPTH && sMethodTracingIgnore) {
return;
}
- results.add(new MethodEntry(m, cnt - 1));
+ addToResults(new MethodEntry(m, cnt - 1));
}
public static void notifyMethodExit(Executable m, boolean exception, Object result) {
@@ -330,9 +341,9 @@
}
if (exception) {
- results.add(new MethodThrownThrough(m, cnt));
+ addToResults(new MethodThrownThrough(m, cnt));
} else {
- results.add(new MethodReturn(m, result, cnt));
+ addToResults(new MethodReturn(m, result, cnt));
}
}
@@ -400,9 +411,9 @@
public static void doFibTest(int x, IntUnaryOperator op) {
try {
int y = op.applyAsInt(x);
- results.add(new FibResult("fibonacci(%d)=%d\n", x, y));
+ addToResults(new FibResult("fibonacci(%d)=%d\n", x, y));
} catch (Throwable t) {
- results.add(new FibThrow("fibonacci(%d) -> %s\n", x, t));
+ addToResults(new FibThrow("fibonacci(%d) -> %s\n", x, t));
}
}
diff --git a/test/Android.bp b/test/Android.bp
index 5bbd45a..c0a363e 100644
--- a/test/Android.bp
+++ b/test/Android.bp
@@ -469,7 +469,7 @@
],
}
-// Same as above, but with a longer timeout.
+// Same as `art-run-test-target-template`, but with a longer timeout.
filegroup {
name: "art-run-test-target-slow-template",
srcs: [
@@ -477,6 +477,15 @@
],
}
+// Same as `art-run-test-target-template`, but contains additional
+// options to also make the test part of ART CTS.
+filegroup {
+ name: "art-run-test-target-cts-template",
+ srcs: [
+ "art-run-test-target-cts-template.xml",
+ ],
+}
+
// Test configuration template for ART run-tests on target expected to run
// successfully, and as such *not* tagged as part of TradeFed's
// `art-target-run-test` test suite via the `test-suite-tag`. This is as
@@ -680,6 +689,7 @@
"1932-monitor-events-misc/monitor_misc.cc",
"1934-jvmti-signal-thread/signal_threads.cc",
"1939-proxy-frames/local_instance.cc",
+ "1940-ddms-ext/ddm_ext.cc",
"1941-dispose-stress/dispose_stress.cc",
"1942-suspend-raw-monitor-exit/native_suspend_monitor.cc",
"1943-suspend-raw-monitor-wait/native_suspend_monitor.cc",
@@ -727,7 +737,6 @@
"936-search-onload/search_onload.cc",
"980-redefine-object/redef_object.cc",
"983-source-transform-verify/source_transform_art.cc",
- "1940-ddms-ext/ddm_ext.cc",
// "1952-pop-frame-jit/pop_frame.cc",
"1959-redefine-object-instrument/fake_redef_object.cc",
"1960-obsolete-jit-multithread-native/native_say_hi.cc",
@@ -1066,6 +1075,7 @@
"1936-thread-end-events/src/art/Test1936.java",
"1937-transform-soft-fail/src/art/Test1937.java",
"1939-proxy-frames/src/art/Test1939.java",
+ "1940-ddms-ext/src-art/art/Test1940.java",
"1941-dispose-stress/src/art/Test1941.java",
"1942-suspend-raw-monitor-exit/src/art/Test1942.java",
"1943-suspend-raw-monitor-wait/src/art/Test1943.java",
@@ -1211,6 +1221,7 @@
"1936-thread-end-events/expected-stdout.txt",
"1937-transform-soft-fail/expected-stdout.txt",
"1939-proxy-frames/expected-stdout.txt",
+ "1940-ddms-ext/expected-stdout.txt",
"1941-dispose-stress/expected-stdout.txt",
"1942-suspend-raw-monitor-exit/expected-stdout.txt",
"1943-suspend-raw-monitor-wait/expected-stdout.txt",
diff --git a/test/art-run-test-target-cts-template.xml b/test/art-run-test-target-cts-template.xml
new file mode 100644
index 0000000..4ce885c
--- /dev/null
+++ b/test/art-run-test-target-cts-template.xml
@@ -0,0 +1,50 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Copyright (C) 2022 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<!-- Note: This test config for {MODULE} is generated from a template. -->
+<configuration description="Test module config for {MODULE}">
+ <option name="test-suite-tag" value="art-target-run-test" />
+ <option name="config-descriptor:metadata" key="mainline-param" value="com.google.android.art.apex" />
+
+ <option name="test-suite-tag" value="cts" />
+ <option name="config-descriptor:metadata" key="component" value="art" />
+ <option name="config-descriptor:metadata" key="parameter" value="not_instant_app" />
+ <option name="config-descriptor:metadata" key="parameter" value="multi_abi" />
+ <option name="config-descriptor:metadata" key="parameter" value="secondary_user" />
+
+ <target_preparer class="com.android.tradefed.targetprep.PushFilePreparer">
+ <option name="cleanup" value="true" />
+ <!-- TODO: Use option `push-file` instead of deprecated option
+ `push`. -->
+ <option name="push" value="{MODULE}.jar->/data/local/tmp/{MODULE}/{MODULE}.jar" />
+ </target_preparer>
+
+ <test class="com.android.tradefed.testtype.ArtRunTest">
+ <option name="run-test-name" value="{MODULE}" />
+ <option name="classpath" value="/data/local/tmp/{MODULE}/{MODULE}.jar" />
+ </test>
+
+ <!-- When this test is run in a Mainline context (e.g. with `mts-tradefed`), only enable it if
+ one of the Mainline modules below is present on the device used for testing. -->
+ <object type="module_controller" class="com.android.tradefed.testtype.suite.module.MainlineTestModuleController">
+ <!-- ART Mainline Module (internal version). -->
+ <option name="mainline-module-package-name" value="com.google.android.art" />
+ <!-- ART Mainline Module (external (AOSP) version). -->
+ <option name="mainline-module-package-name" value="com.android.art" />
+ </object>
+
+ <!-- Only run tests if the device under test is SDK version 31 (Android 12) or above. -->
+ <object type="module_controller" class="com.android.tradefed.testtype.suite.module.Sdk31ModuleController" />
+</configuration>
diff --git a/test/common/runtime_state.cc b/test/common/runtime_state.cc
index 0b72612..f39bca1 100644
--- a/test/common/runtime_state.cc
+++ b/test/common/runtime_state.cc
@@ -276,7 +276,20 @@
// this before checking if we will execute JIT code in case the request
// is for an 'optimized' compilation.
jit->CompileMethod(method, self, kind, /*prejit=*/ false);
- } while (!code_cache->ContainsPc(method->GetEntryPointFromQuickCompiledCode()));
+ const void* entry_point = method->GetEntryPointFromQuickCompiledCode();
+ if (code_cache->ContainsPc(entry_point)) {
+ // If we're running baseline or not requesting optimized, we're good to go.
+ if (jit->GetJitCompiler()->IsBaselineCompiler() || kind != CompilationKind::kOptimized) {
+ break;
+ }
+ // If we're requesting optimized, check that we did get the method
+ // compiled optimized.
+ OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromEntryPoint(entry_point);
+ if (!CodeInfo::IsBaseline(method_header->GetOptimizedCodeInfoPtr())) {
+ break;
+ }
+ }
+ } while (true);
}
extern "C" JNIEXPORT void JNICALL Java_Main_ensureMethodJitCompiled(JNIEnv*, jclass, jobject meth) {
diff --git a/test/knownfailures.json b/test/knownfailures.json
index 3e485f6..013272f 100644
--- a/test/knownfailures.json
+++ b/test/knownfailures.json
@@ -1358,8 +1358,13 @@
"description": "Interpreting BigInteger.add() is too slow (timeouts)"
},
{
- "tests": ["2029-contended-monitors"],
+ "tests": ["2029-contended-monitors", "2043-reference-pauses"],
"variant": "interpreter | interp-ac | gcstress | trace",
+ "description": ["Slow tests. Prone to timeouts."]
+ },
+ {
+ "tests": ["2042-reference-processing"],
+ "variant": "interpreter | interp-ac | gcstress | trace | debuggable",
"description": ["Slow test. Prone to timeouts."]
},
{
@@ -1497,5 +1502,10 @@
"variant": "target & ndebug & 64",
"bug": "b/224733324",
"description": ["segfault in VarHandle::GetMethodTypeMatchForAccessMode"]
+ },
+ {
+ "tests": ["2043-reference-pauses"],
+ "env_vars": {"ART_TEST_DEBUG_GC": "true"},
+ "description": ["Test timing out on debug gc."]
}
]
diff --git a/test/testrunner/env.py b/test/testrunner/env.py
index 319e1a7..d5e0543 100644
--- a/test/testrunner/env.py
+++ b/test/testrunner/env.py
@@ -132,8 +132,8 @@
HOST_OUT_EXECUTABLES = os.path.join(ANDROID_BUILD_TOP,
_get_build_var("HOST_OUT_EXECUTABLES"))
-# Set up default values for $DX, $SMALI, etc to the $HOST_OUT_EXECUTABLES/$name path.
-for tool in ['dx', 'smali', 'jasmin', 'd8']:
+# Set up default values for $D8, $SMALI, etc to the $HOST_OUT_EXECUTABLES/$name path.
+for tool in ['smali', 'jasmin', 'd8']:
os.environ.setdefault(tool.upper(), HOST_OUT_EXECUTABLES + '/' + tool)
ANDROID_JAVA_TOOLCHAIN = os.path.join(ANDROID_BUILD_TOP,
diff --git a/test/testrunner/run_build_test_target.py b/test/testrunner/run_build_test_target.py
index 4191771..2e4e91a 100755
--- a/test/testrunner/run_build_test_target.py
+++ b/test/testrunner/run_build_test_target.py
@@ -81,7 +81,7 @@
if 'build' in target:
build_command = target.get('build').format(
ANDROID_BUILD_TOP = env.ANDROID_BUILD_TOP,
- MAKE_OPTIONS='DX= -j{threads}'.format(threads = n_threads))
+ MAKE_OPTIONS='D8= -j{threads}'.format(threads = n_threads))
sys.stdout.write(str(build_command) + '\n')
sys.stdout.flush()
if subprocess.call(build_command.split()):
@@ -90,7 +90,7 @@
# make runs soong/kati to build the target listed in the entry.
if 'make' in target:
build_command = 'build/soong/soong_ui.bash --make-mode'
- build_command += ' DX='
+ build_command += ' D8='
build_command += ' -j' + str(n_threads)
build_command += ' ' + target.get('make')
if env.DIST_DIR:
diff --git a/test/testrunner/testrunner.py b/test/testrunner/testrunner.py
index 935ce0c..de0cd62 100755
--- a/test/testrunner/testrunner.py
+++ b/test/testrunner/testrunner.py
@@ -1241,7 +1241,7 @@
if 'jvm' in _user_input_variants['target']:
build_targets += 'test-art-host-run-test-dependencies '
build_command = env.ANDROID_BUILD_TOP + '/build/soong/soong_ui.bash --make-mode'
- build_command += ' DX='
+ build_command += ' D8='
if dist:
build_command += ' dist'
build_command += ' ' + build_targets
diff --git a/test/utils/regen-test-files b/test/utils/regen-test-files
index 5478a86..7ddffe6 100755
--- a/test/utils/regen-test-files
+++ b/test/utils/regen-test-files
@@ -205,6 +205,13 @@
"832-cha-recursive",
])
+known_failing_on_hwasan_tests = frozenset([
+ "CtsJdwpTestCases", # times out
+ "art_standalone_compiler_tests", # b/230392320
+ "art_standalone_dex2oat_tests", # b/228881278
+ "BootImageProfileTest", # b/232012605
+])
+
# ART gtests that do not need root access to the device.
art_gtest_user_module_names = [
"art_libnativebridge_cts_tests",
@@ -428,16 +435,33 @@
run_test_path = os.path.join(self.art_test_dir, run_test)
bp_file = os.path.join(run_test_path, "Android.bp")
+ # Optional test metadata (JSON file).
+ metadata_file = os.path.join(run_test_path, "test-metadata.json")
+ metadata = {}
+ if os.path.exists(metadata_file):
+ with open(metadata_file, "r") as f:
+ metadata = json.load(f)
+
run_test_module_name = ART_RUN_TEST_MODULE_NAME_PREFIX + run_test
+ # Set the test configuration template.
if self.is_runnable(run_test):
- if self.is_slow(run_test):
- test_config_template= "art-run-test-target-slow-template"
+ if "cts" in metadata.get("test_suites", []):
+ test_config_template = "art-run-test-target-cts-template"
+ elif self.is_slow(run_test):
+ test_config_template = "art-run-test-target-slow-template"
else:
test_config_template = "art-run-test-target-template"
else:
test_config_template = "art-run-test-target-no-test-suite-tag-template"
+ # Define `test_suites`, if present in the test's metadata.
+ test_suites = ""
+ if metadata.get("test_suites"):
+ test_suites = f"""\
+
+ test_suites: {json.dumps(metadata.get("test_suites"))},"""
+
if is_checker_test(run_test):
include_src = """\
@@ -478,7 +502,7 @@
data: [
":{run_test_module_name}-expected-stdout",
":{run_test_module_name}-expected-stderr",
- ],{include_src}
+ ],{test_suites}{include_src}
}}
// Test's expected standard output.
@@ -524,6 +548,8 @@
]
presubmit_tests = other_presubmit_tests + run_test_module_names + art_gtest_module_names
presubmit_tests_dict = [{"name": t} for t in presubmit_tests]
+ hwasan_presubmit_tests_dict = [{"name": t} for t in presubmit_tests
+ if t not in known_failing_on_hwasan_tests]
# Use an `OrderedDict` container to preserve the order in which items are inserted.
# Do not produce an entry for a test group if it is empty.
@@ -533,6 +559,7 @@
in [
("mainline-presubmit", mainline_presubmit_tests_dict),
("presubmit", presubmit_tests_dict),
+ ("hwasan-presubmit", hwasan_presubmit_tests_dict),
]
if test_group_dict
])
diff --git a/tools/build_linux_bionic.sh b/tools/build_linux_bionic.sh
index 8992512..bbe71b6 100755
--- a/tools/build_linux_bionic.sh
+++ b/tools/build_linux_bionic.sh
@@ -16,83 +16,38 @@
# This will build a target using linux_bionic. It can be called with normal make
# flags.
-#
-# TODO This runs a 'm clean' prior to building the targets in order to ensure
-# that obsolete kati files don't mess up the build.
-if [[ -z $ANDROID_BUILD_TOP ]]; then
- pushd .
-else
- pushd $ANDROID_BUILD_TOP
-fi
+set -e
if [ ! -d art ]; then
echo "Script needs to be run at the root of the android tree"
exit 1
fi
+export TARGET_PRODUCT=linux_bionic
+
+# Avoid Soong error about invalid dependencies on disabled libLLVM_android,
+# which we get due to the --soong-only mode. (Another variant is to set
+# SOONG_ALLOW_MISSING_DEPENDENCIES).
+export FORCE_BUILD_LLVM_COMPONENTS=true
+
# TODO(b/194433871): Set MODULE_BUILD_FROM_SOURCE to disable prebuilt modules,
# which Soong otherwise can create duplicate install rules for in --soong-only
# mode.
-soong_args="MODULE_BUILD_FROM_SOURCE=true"
+export MODULE_BUILD_FROM_SOURCE=true
# Switch the build system to unbundled mode in the reduced manifest branch.
if [ ! -d frameworks/base ]; then
- soong_args="$soong_args TARGET_BUILD_UNBUNDLED=true"
+ export TARGET_BUILD_UNBUNDLED=true
fi
-source build/envsetup.sh >&/dev/null # for get_build_var
-# Soong needs a bunch of variables set and will not run if they are missing.
-# The default values of these variables is only contained in make, so use
-# nothing to create the variables then remove all the other artifacts.
-# Lunch since it seems we cannot find the build-number otherwise.
-lunch aosp_x86-eng
-build/soong/soong_ui.bash --make-mode $soong_args nothing
+vars="$(build/soong/soong_ui.bash --dumpvars-mode --vars="OUT_DIR BUILD_NUMBER")"
+# Assign to a variable and eval that, since bash ignores any error status from
+# the command substitution if it's directly on the eval line.
+eval $vars
-if [ $? != 0 ]; then
- exit 1
-fi
+# This file is currently not created in --soong-only mode, but some build
+# targets depend on it.
+printf %s "${BUILD_NUMBER}" > ${OUT_DIR}/soong/build_number.txt
-out_dir=$(get_build_var OUT_DIR)
-host_out=$(get_build_var HOST_OUT)
-
-# TODO(b/31559095) Figure out a better way to do this.
-#
-# There is no good way to force soong to generate host-bionic builds currently
-# so this is a hacky workaround.
-tmp_soong_var=$(mktemp --tmpdir soong.variables.bak.XXXXXX)
-tmp_build_number=$(cat ${out_dir}/soong/build_number.txt)
-
-cat $out_dir/soong/soong.variables > ${tmp_soong_var}
-
-# See comment above about b/123645297 for why we cannot just do m clean. Clear
-# out all files except for intermediates and installed files and dexpreopt.config.
-find $out_dir/ -maxdepth 1 -mindepth 1 \
- -not -name soong \
- -not -name host \
- -not -name target | xargs -I '{}' rm -rf '{}'
-find $out_dir/soong/ -maxdepth 1 -mindepth 1 \
- -not -name .intermediates \
- -not -name host \
- -not -name dexpreopt.config \
- -not -name target | xargs -I '{}' rm -rf '{}'
-
-python3 <<END - ${tmp_soong_var} ${out_dir}/soong/soong.variables
-import json
-import sys
-x = json.load(open(sys.argv[1]))
-x['Allow_missing_dependencies'] = True
-x['HostArch'] = 'x86_64'
-x['CrossHost'] = 'linux_bionic'
-x['CrossHostArch'] = 'x86_64'
-if 'CrossHostSecondaryArch' in x:
- del x['CrossHostSecondaryArch']
-json.dump(x, open(sys.argv[2], mode='w'))
-END
-
-rm $tmp_soong_var
-
-# Write a new build-number
-echo ${tmp_build_number}_SOONG_ONLY_BUILD > ${out_dir}/soong/build_number.txt
-
-build/soong/soong_ui.bash --make-mode --skip-config --soong-only $soong_args $@
+build/soong/soong_ui.bash --make-mode --soong-only "$@"
diff --git a/tools/build_linux_bionic_tests.sh b/tools/build_linux_bionic_tests.sh
index 7379e9a..0470d6d 100755
--- a/tools/build_linux_bionic_tests.sh
+++ b/tools/build_linux_bionic_tests.sh
@@ -14,64 +14,36 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-
-if [[ -z $ANDROID_BUILD_TOP ]]; then
- pushd .
-else
- pushd $ANDROID_BUILD_TOP
-fi
+set -e
if [ ! -d art ]; then
echo "Script needs to be run at the root of the android tree"
exit 1
fi
-soong_args=""
-
# Switch the build system to unbundled mode in the reduced manifest branch.
if [ ! -d frameworks/base ]; then
- soong_args="$soong_args TARGET_BUILD_UNBUNDLED=true"
+ export TARGET_BUILD_UNBUNDLED=true
fi
-source build/envsetup.sh >&/dev/null # for get_build_var
-
-out_dir=$(get_build_var OUT_DIR)
-host_out=$(get_build_var HOST_OUT)
-
-# TODO(b/31559095) Figure out a better way to do this.
-#
-# There is no good way to force soong to generate host-bionic builds currently
-# so this is a hacky workaround.
+vars="$(build/soong/soong_ui.bash --dumpvars-mode --vars="OUT_DIR HOST_OUT")"
+# Assign to a variable and eval that, since bash ignores any error status from
+# the command substitution if it's directly on the eval line.
+eval $vars
# First build all the targets still in .mk files (also build normal glibc host
# targets so we know what's needed to run the tests).
-build/soong/soong_ui.bash --make-mode $soong_args "$@" test-art-host-run-test-dependencies build-art-host-tests
-if [ $? != 0 ]; then
- exit 1
-fi
+build/soong/soong_ui.bash --make-mode "$@" test-art-host-run-test-dependencies build-art-host-tests
-tmp_soong_var=$(mktemp --tmpdir soong.variables.bak.XXXXXX)
+# Next build the Linux host Bionic targets in --soong-only mode.
+export TARGET_PRODUCT=linux_bionic
-echo "Saving soong.variables to " $tmp_soong_var
-cat $out_dir/soong/soong.variables > ${tmp_soong_var}
-python3 <<END - ${tmp_soong_var} ${out_dir}/soong/soong.variables
-import json
-import sys
-x = json.load(open(sys.argv[1]))
-x['Allow_missing_dependencies'] = True
-x['HostArch'] = 'x86_64'
-x['CrossHost'] = 'linux_bionic'
-x['CrossHostArch'] = 'x86_64'
-if 'CrossHostSecondaryArch' in x:
- del x['CrossHostSecondaryArch']
-json.dump(x, open(sys.argv[2], mode='w'))
-END
-if [ $? != 0 ]; then
- mv $tmp_soong_var $out_dir/soong/soong.variables
- exit 2
-fi
+# Avoid Soong error about invalid dependencies on disabled libLLVM_android,
+# which we get due to the --soong-only mode. (Another variant is to set
+# SOONG_ALLOW_MISSING_DEPENDENCIES).
+export FORCE_BUILD_LLVM_COMPONENTS=true
-soong_out=$out_dir/soong/host/linux_bionic-x86
+soong_out=$OUT_DIR/soong/host/linux_bionic-x86
declare -a bionic_targets
# These are the binaries actually used in tests. Since some of the files are
# java targets or 32 bit we cannot just do the same find for the bin files.
@@ -89,24 +61,10 @@
$soong_out/bin/hprof-conv
$soong_out/bin/signal_dumper
$soong_out/lib64/libclang_rt.ubsan_standalone-x86_64-android.so
- $(find $host_out/apex/com.android.art.host.zipapex -type f | sed "s:$host_out:$soong_out:g")
- $(find $host_out/lib64 -type f | sed "s:$host_out:$soong_out:g")
- $(find $host_out/nativetest64 -type f | sed "s:$host_out:$soong_out:g"))
+ $(find $HOST_OUT/apex/com.android.art.host.zipapex -type f | sed "s:$HOST_OUT:$soong_out:g")
+ $(find $HOST_OUT/lib64 -type f | sed "s:$HOST_OUT:$soong_out:g")
+ $(find $HOST_OUT/nativetest64 -type f | sed "s:$HOST_OUT:$soong_out:g"))
echo building ${bionic_targets[*]}
-build/soong/soong_ui.bash --make-mode --skip-config --soong-only $soong_args "$@" ${bionic_targets[*]}
-ret=$?
-
-mv $tmp_soong_var $out_dir/soong/soong.variables
-
-# Having built with host-bionic confuses soong somewhat by making it think the
-# linux_bionic targets are needed for art phony targets like
-# test-art-host-run-test-dependencies. To work around this blow away all
-# ninja files in OUT_DIR. The build system is smart enough to not need to
-# rebuild stuff so this should be fine.
-rm -f $OUT_DIR/*.ninja $OUT_DIR/soong/*.ninja
-
-popd
-
-exit $ret
+build/soong/soong_ui.bash --make-mode --soong-only "$@" ${bionic_targets[*]}
diff --git a/tools/buildbot-build.sh b/tools/buildbot-build.sh
index 4af36c5..7692fa7 100755
--- a/tools/buildbot-build.sh
+++ b/tools/buildbot-build.sh
@@ -181,7 +181,7 @@
fi
done
- # Replace stub libraries with implemenation libraries: because we do chroot
+ # Replace stub libraries with implementation libraries: because we do chroot
# testing, we need to install an implementation of the libraries (and cannot
# rely on the one already installed on the device, if the device is post R and
# has it).
diff --git a/tools/cpp-define-generator/globals.def b/tools/cpp-define-generator/globals.def
index 2572ea6..459e5a8 100644
--- a/tools/cpp-define-generator/globals.def
+++ b/tools/cpp-define-generator/globals.def
@@ -28,6 +28,7 @@
#include "mirror/object_reference.h"
#include "runtime_globals.h"
#include "stack.h"
+#include "entrypoints/quick/callee_save_frame.h"
#endif
ASM_DEFINE(ACCESS_FLAGS_METHOD_IS_NATIVE,
@@ -82,3 +83,11 @@
std::memory_order_relaxed)
ASM_DEFINE(STACK_OVERFLOW_RESERVED_BYTES,
GetStackOverflowReservedBytes(art::kRuntimeISA))
+ASM_DEFINE(CALLEE_SAVE_EVERYTHING_NUM_CORE_SPILLS,
+ art::POPCOUNT(art::RuntimeCalleeSaveFrame::GetCoreSpills(
+ art::CalleeSaveType::kSaveEverything)))
+ASM_DEFINE(TAGGED_JNI_SP_MASK, art::ManagedStack::kTaggedJniSpMask)
+ASM_DEFINE(TAGGED_JNI_SP_MASK_TOGGLED32,
+ ~static_cast<uint32_t>(art::ManagedStack::kTaggedJniSpMask))
+ASM_DEFINE(TAGGED_JNI_SP_MASK_TOGGLED64,
+ ~static_cast<uint64_t>(art::ManagedStack::kTaggedJniSpMask))
diff --git a/tools/cpp-define-generator/lockword.def b/tools/cpp-define-generator/lockword.def
index a170c15..5494d59 100644
--- a/tools/cpp-define-generator/lockword.def
+++ b/tools/cpp-define-generator/lockword.def
@@ -30,10 +30,8 @@
art::LockWord::kMarkBitStateMaskShifted)
ASM_DEFINE(LOCK_WORD_MARK_BIT_SHIFT,
art::LockWord::kMarkBitStateShift)
-ASM_DEFINE(LOCK_WORD_READ_BARRIER_STATE_MASK,
+ASM_DEFINE(LOCK_WORD_READ_BARRIER_STATE_MASK_SHIFTED,
art::LockWord::kReadBarrierStateMaskShifted)
-ASM_DEFINE(LOCK_WORD_READ_BARRIER_STATE_MASK_TOGGLED,
- art::LockWord::kReadBarrierStateMaskShiftedToggled)
ASM_DEFINE(LOCK_WORD_READ_BARRIER_STATE_SHIFT,
art::LockWord::kReadBarrierStateShift)
ASM_DEFINE(LOCK_WORD_STATE_FORWARDING_ADDRESS,
diff --git a/tools/cpp-define-generator/thread.def b/tools/cpp-define-generator/thread.def
index bae9200..d796542 100644
--- a/tools/cpp-define-generator/thread.def
+++ b/tools/cpp-define-generator/thread.def
@@ -37,6 +37,8 @@
(art::WhichPowerOf2(sizeof(art::InterpreterCache::Entry)) - 2))
ASM_DEFINE(THREAD_IS_GC_MARKING_OFFSET,
art::Thread::IsGcMarkingOffset<art::kRuntimePointerSize>().Int32Value())
+ASM_DEFINE(THREAD_DEOPT_CHECK_REQUIRED_OFFSET,
+ art::Thread::DeoptCheckRequiredOffset<art::kRuntimePointerSize>().Int32Value())
ASM_DEFINE(THREAD_LOCAL_ALLOC_STACK_END_OFFSET,
art::Thread::ThreadLocalAllocStackEndOffset<art::kRuntimePointerSize>().Int32Value())
ASM_DEFINE(THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET,
diff --git a/tools/dist_linux_bionic.sh b/tools/dist_linux_bionic.sh
index 4c7ba1c..f710310 100755
--- a/tools/dist_linux_bionic.sh
+++ b/tools/dist_linux_bionic.sh
@@ -19,12 +19,6 @@
# Builds the given targets using linux-bionic and moves the output files to the
# DIST_DIR. Takes normal make arguments.
-if [[ -z $ANDROID_BUILD_TOP ]]; then
- pushd .
-else
- pushd $ANDROID_BUILD_TOP
-fi
-
if [[ -z $DIST_DIR ]]; then
echo "DIST_DIR must be set!"
exit 1
@@ -35,10 +29,12 @@
exit 1
fi
-source build/envsetup.sh >&/dev/null # for get_build_var
-out_dir=$(get_build_var OUT_DIR)
+vars="$(build/soong/soong_ui.bash --dumpvars-mode --vars="OUT_DIR")"
+# Assign to a variable and eval that, since bash ignores any error status from
+# the command substitution if it's directly on the eval line.
+eval $vars
-./art/tools/build_linux_bionic.sh $@
+./art/tools/build_linux_bionic.sh "$@"
mkdir -p $DIST_DIR
-cp -R ${out_dir}/soong/host/* $DIST_DIR/
+cp -R ${OUT_DIR}/soong/host/* $DIST_DIR/
diff --git a/tools/libcore_failures.txt b/tools/libcore_failures.txt
index 6e6ccb8..0b40e19 100644
--- a/tools/libcore_failures.txt
+++ b/tools/libcore_failures.txt
@@ -330,5 +330,18 @@
bug: 228441328,
names: ["tck.java.time",
"test.java.time"]
+},
+{
+ description: "vogar does not respect '@BeforeMethod' and '@AfterMethod' annotations yet",
+ result: ERROR,
+ bug: 231283845,
+ names: ["test.java.lang.invoke.MethodTypeTest#testGeneric",
+ "test.java.lang.invoke.MethodTypeTest#testMakeGeneric"]
+},
+{
+ description: "Timing out after ojluni tests were enabled",
+ result: ERROR,
+ bug: 231439593,
+ names: ["org.apache.harmony.tests.java.math.BigIntegerConstructorsTest#testConstructorPrime"]
}
]
diff --git a/tools/libcore_fugu_failures.txt b/tools/libcore_fugu_failures.txt
index 0fff814..11067f5 100644
--- a/tools/libcore_fugu_failures.txt
+++ b/tools/libcore_fugu_failures.txt
@@ -112,7 +112,6 @@
"org.apache.harmony.crypto.tests.javax.crypto.func.CipherRSATest#test_RSANoPadding",
"org.apache.harmony.crypto.tests.javax.crypto.func.CipherRSATest#test_RSAShortKey",
"org.apache.harmony.crypto.tests.javax.crypto.func.KeyGeneratorFunctionalTest#test_",
- "org.apache.harmony.tests.java.math.BigIntegerConstructorsTest#testConstructorPrime",
"org.apache.harmony.tests.java.math.BigIntegerTest#test_isProbablePrimeI",
"org.apache.harmony.tests.java.math.OldBigIntegerTest#test_ConstructorIILjava_util_Random",
"org.apache.harmony.tests.java.math.OldBigIntegerTest#test_isProbablePrimeI",
@@ -125,7 +124,102 @@
"org.apache.harmony.tests.javax.security.OldSHA1PRNGSecureRandomTest#testGenerateSeedint02",
"org.apache.harmony.tests.javax.security.OldSHA1PRNGSecureRandomTest#testGenerateSeedint03",
"org.apache.harmony.tests.javax.security.OldSHA1PRNGSecureRandomTest#testNextBytesbyteArray03",
- "org.apache.harmony.tests.javax.security.OldSHA1PRNGSecureRandomTest#testSetSeedbyteArray02"
+ "org.apache.harmony.tests.javax.security.OldSHA1PRNGSecureRandomTest#testSetSeedbyteArray02",
+ "test.java.awt",
+ "test.java.io.ByteArrayInputStream",
+ "test.java.io.ByteArrayOutputStream",
+ "test.java.io.FileReader",
+ "test.java.io.FileWriter",
+ "test.java.io.InputStream",
+ "test.java.io.OutputStream",
+ "test.java.io.PrintStream",
+ "test.java.io.PrintWriter",
+ "test.java.io.Reader",
+ "test.java.io.Writer",
+ "test.java.lang.Boolean",
+ "test.java.lang.ClassLoader",
+ "test.java.lang.Double",
+ "test.java.lang.Float",
+ "test.java.lang.Integer",
+ "test.java.lang.Long",
+ "test.java.lang.StrictMath.CubeRootTests",
+ "test.java.lang.StrictMath.ExactArithTests",
+ "test.java.lang.StrictMath.Expm1Tests",
+ "test.java.lang.StrictMath.ExpTests",
+ "test.java.lang.StrictMath.HyperbolicTests",
+ "test.java.lang.StrictMath.HypotTests#testAgainstTranslit_shard1",
+ "test.java.lang.StrictMath.HypotTests#testAgainstTranslit_shard2",
+ "test.java.lang.StrictMath.HypotTests#testAgainstTranslit_shard3",
+ "test.java.lang.StrictMath.HypotTests#testAgainstTranslit_shard4",
+ "test.java.lang.StrictMath.HypotTests#testHypot",
+ "test.java.lang.StrictMath.Log1pTests",
+ "test.java.lang.StrictMath.Log10Tests",
+ "test.java.lang.StrictMath.MultiplicationTests",
+ "test.java.lang.StrictMath.PowTests",
+ "test.java.lang.String",
+ "test.java.lang.Thread",
+ "test.java.lang.invoke",
+ "test.java.lang.ref.SoftReference",
+ "test.java.lang.ref.BasicTest",
+ "test.java.lang.ref.EnqueueNullRefTest",
+ "test.java.lang.ref.EnqueuePollRaceTest",
+ "test.java.lang.ref.ReferenceCloneTest",
+ "test.java.lang.ref.ReferenceEnqueuePendingTest",
+ "test.java.math.BigDecimal",
+ "test.java.math.BigInteger#testArithmetic",
+ "test.java.math.BigInteger#testBitCount",
+ "test.java.math.BigInteger#testBitLength",
+ "test.java.math.BigInteger#testbitOps",
+ "test.java.math.BigInteger#testBitwise",
+ "test.java.math.BigInteger#testByteArrayConv",
+ "test.java.math.BigInteger#testConstructor",
+ "test.java.math.BigInteger#testDivideAndReminder",
+ "test.java.math.BigInteger#testDivideLarge",
+ "test.java.math.BigInteger#testModExp",
+ "test.java.math.BigInteger#testMultiplyLarge",
+ "test.java.math.BigInteger#testNextProbablePrime",
+ "test.java.math.BigInteger#testPow",
+ "test.java.math.BigInteger#testSerialize",
+ "test.java.math.BigInteger#testShift",
+ "test.java.math.BigInteger#testSquare",
+ "test.java.math.BigInteger#testSquareLarge",
+ "test.java.math.BigInteger#testSquareRootAndReminder",
+ "test.java.math.BigInteger#testStringConv_generic",
+ "test.java.math.RoundingMode",
+ "test.java.net.DatagramSocket",
+ "test.java.net.Socket",
+ "test.java.net.SocketOptions",
+ "test.java.net.URLDecoder",
+ "test.java.net.URLEncoder",
+ "test.java.nio.channels.Channels",
+ "test.java.nio.channels.SelectionKey",
+ "test.java.nio.channels.Selector",
+ "test.java.nio.file",
+ "test.java.security.cert",
+ "test.java.security.KeyAgreement.KeyAgreementTest",
+ "test.java.security.KeyAgreement.KeySizeTest#testECDHKeySize",
+ "test.java.security.KeyAgreement.KeySpecTest",
+ "test.java.security.KeyAgreement.MultiThreadTest",
+ "test.java.security.KeyAgreement.NegativeTest",
+ "test.java.security.KeyStore",
+ "test.java.security.Provider",
+ "test.java.util.Arrays",
+ "test.java.util.Collection",
+ "test.java.util.Collections",
+ "test.java.util.Date",
+ "test.java.util.EnumMap",
+ "test.java.util.EnumSet",
+ "test.java.util.GregorianCalendar",
+ "test.java.util.LinkedHashMap",
+ "test.java.util.LinkedHashSet",
+ "test.java.util.List",
+ "test.java.util.Map",
+ "test.java.util.Optional",
+ "test.java.util.TimeZone",
+ "test.java.util.concurrent",
+ "test.java.util.function",
+ "test.java.util.stream",
+ "test.java.util.zip.ZipFile"
]
}
]
diff --git a/tools/libcore_gcstress_debug_failures.txt b/tools/libcore_gcstress_debug_failures.txt
index 2193189..ccd8db5 100644
--- a/tools/libcore_gcstress_debug_failures.txt
+++ b/tools/libcore_gcstress_debug_failures.txt
@@ -50,9 +50,14 @@
"org.apache.harmony.luni.tests.internal.net.www.protocol.https.HttpsURLConnectionTest#testConsequentProxyConnection",
"org.apache.harmony.tests.java.lang.ref.ReferenceQueueTest#test_removeJ",
"org.apache.harmony.tests.java.lang.ProcessManagerTest#testSleep",
- "org.apache.harmony.tests.java.math.BigIntegerConstructorsTest#testConstructorPrime",
"org.apache.harmony.tests.java.util.TimerTest#testOverdueTaskExecutesImmediately",
- "org.apache.harmony.tests.java.util.WeakHashMapTest#test_keySet_hasNext"
+ "org.apache.harmony.tests.java.util.WeakHashMapTest#test_keySet_hasNext",
+ "test.java.lang.StrictMath.HypotTests_testAgainstTranslit_shard1",
+ "test.java.lang.StrictMath.HypotTests_testAgainstTranslit_shard2",
+ "test.java.lang.StrictMath.HypotTests_testAgainstTranslit_shard3",
+ "test.java.lang.StrictMath.HypotTests_testAgainstTranslit_shard4",
+ "test.java.math.BigDecimal",
+ "test.java.math.BigInteger_testConstructor"
]
},
{
diff --git a/tools/libcore_gcstress_failures.txt b/tools/libcore_gcstress_failures.txt
index 55bba72..81d6ca0 100644
--- a/tools/libcore_gcstress_failures.txt
+++ b/tools/libcore_gcstress_failures.txt
@@ -36,7 +36,6 @@
"libcore.java.util.stream.CollectorsTest#counting_largeStream",
"org.apache.harmony.tests.java.lang.ref.ReferenceQueueTest#test_remove",
"org.apache.harmony.tests.java.lang.String2Test#test_getBytes",
- "org.apache.harmony.tests.java.math.BigIntegerConstructorsTest#testConstructorPrime",
"org.apache.harmony.tests.java.text.DateFormatTest#test_getAvailableLocales",
"org.apache.harmony.tests.java.util.TimerTest#testOverdueTaskExecutesImmediately",
"org.apache.harmony.tests.java.util.WeakHashMapTest#test_keySet_hasNext"]
diff --git a/tools/luci/config/generated/cr-buildbucket.cfg b/tools/luci/config/generated/cr-buildbucket.cfg
index e4a5923..887b2af 100644
--- a/tools/luci/config/generated/cr-buildbucket.cfg
+++ b/tools/luci/config/generated/cr-buildbucket.cfg
@@ -35,7 +35,7 @@
service_account: "art-ci-builder@chops-service-accounts.iam.gserviceaccount.com"
experiments {
key: "luci.recipes.use_python3"
- value: 10
+ value: 100
}
}
builders {
@@ -59,7 +59,7 @@
service_account: "art-ci-builder@chops-service-accounts.iam.gserviceaccount.com"
experiments {
key: "luci.recipes.use_python3"
- value: 10
+ value: 100
}
}
builders {
@@ -83,7 +83,7 @@
service_account: "art-ci-builder@chops-service-accounts.iam.gserviceaccount.com"
experiments {
key: "luci.recipes.use_python3"
- value: 10
+ value: 100
}
}
builders {
@@ -107,7 +107,7 @@
service_account: "art-ci-builder@chops-service-accounts.iam.gserviceaccount.com"
experiments {
key: "luci.recipes.use_python3"
- value: 10
+ value: 100
}
}
builders {
@@ -131,7 +131,7 @@
service_account: "art-ci-builder@chops-service-accounts.iam.gserviceaccount.com"
experiments {
key: "luci.recipes.use_python3"
- value: 10
+ value: 100
}
}
builders {
@@ -155,7 +155,7 @@
service_account: "art-ci-builder@chops-service-accounts.iam.gserviceaccount.com"
experiments {
key: "luci.recipes.use_python3"
- value: 10
+ value: 100
}
}
builders {
@@ -179,7 +179,7 @@
service_account: "art-ci-builder@chops-service-accounts.iam.gserviceaccount.com"
experiments {
key: "luci.recipes.use_python3"
- value: 10
+ value: 100
}
}
builders {
@@ -203,7 +203,7 @@
service_account: "art-ci-builder@chops-service-accounts.iam.gserviceaccount.com"
experiments {
key: "luci.recipes.use_python3"
- value: 10
+ value: 100
}
}
builders {
@@ -227,7 +227,7 @@
service_account: "art-ci-builder@chops-service-accounts.iam.gserviceaccount.com"
experiments {
key: "luci.recipes.use_python3"
- value: 10
+ value: 100
}
}
builders {
@@ -251,7 +251,7 @@
service_account: "art-ci-builder@chops-service-accounts.iam.gserviceaccount.com"
experiments {
key: "luci.recipes.use_python3"
- value: 10
+ value: 100
}
}
builders {
@@ -275,7 +275,7 @@
service_account: "art-ci-builder@chops-service-accounts.iam.gserviceaccount.com"
experiments {
key: "luci.recipes.use_python3"
- value: 10
+ value: 100
}
}
builders {
@@ -299,7 +299,7 @@
service_account: "art-ci-builder@chops-service-accounts.iam.gserviceaccount.com"
experiments {
key: "luci.recipes.use_python3"
- value: 10
+ value: 100
}
}
builders {
@@ -323,7 +323,7 @@
service_account: "art-ci-builder@chops-service-accounts.iam.gserviceaccount.com"
experiments {
key: "luci.recipes.use_python3"
- value: 10
+ value: 100
}
}
builders {
@@ -347,7 +347,7 @@
service_account: "art-ci-builder@chops-service-accounts.iam.gserviceaccount.com"
experiments {
key: "luci.recipes.use_python3"
- value: 10
+ value: 100
}
}
builders {
@@ -371,7 +371,7 @@
service_account: "art-ci-builder@chops-service-accounts.iam.gserviceaccount.com"
experiments {
key: "luci.recipes.use_python3"
- value: 10
+ value: 100
}
}
builders {
@@ -395,7 +395,7 @@
service_account: "art-ci-builder@chops-service-accounts.iam.gserviceaccount.com"
experiments {
key: "luci.recipes.use_python3"
- value: 10
+ value: 100
}
}
builders {
@@ -419,7 +419,7 @@
service_account: "art-ci-builder@chops-service-accounts.iam.gserviceaccount.com"
experiments {
key: "luci.recipes.use_python3"
- value: 10
+ value: 100
}
}
builders {
@@ -443,7 +443,7 @@
service_account: "art-ci-builder@chops-service-accounts.iam.gserviceaccount.com"
experiments {
key: "luci.recipes.use_python3"
- value: 10
+ value: 100
}
}
builders {
@@ -467,7 +467,7 @@
service_account: "art-ci-builder@chops-service-accounts.iam.gserviceaccount.com"
experiments {
key: "luci.recipes.use_python3"
- value: 10
+ value: 100
}
}
builders {
@@ -491,7 +491,7 @@
service_account: "art-ci-builder@chops-service-accounts.iam.gserviceaccount.com"
experiments {
key: "luci.recipes.use_python3"
- value: 10
+ value: 100
}
}
builders {
@@ -515,7 +515,7 @@
service_account: "art-ci-builder@chops-service-accounts.iam.gserviceaccount.com"
experiments {
key: "luci.recipes.use_python3"
- value: 10
+ value: 100
}
}
builders {
@@ -539,7 +539,7 @@
service_account: "art-ci-builder@chops-service-accounts.iam.gserviceaccount.com"
experiments {
key: "luci.recipes.use_python3"
- value: 10
+ value: 100
}
}
builders {
@@ -563,7 +563,7 @@
service_account: "art-ci-builder@chops-service-accounts.iam.gserviceaccount.com"
experiments {
key: "luci.recipes.use_python3"
- value: 10
+ value: 100
}
}
builders {
@@ -587,7 +587,7 @@
service_account: "art-ci-builder@chops-service-accounts.iam.gserviceaccount.com"
experiments {
key: "luci.recipes.use_python3"
- value: 10
+ value: 100
}
}
builders {
@@ -611,7 +611,7 @@
service_account: "art-ci-builder@chops-service-accounts.iam.gserviceaccount.com"
experiments {
key: "luci.recipes.use_python3"
- value: 10
+ value: 100
}
}
}
diff --git a/tools/luci/config/main.star b/tools/luci/config/main.star
index f4ad488..d6dc9d5 100755
--- a/tools/luci/config/main.star
+++ b/tools/luci/config/main.star
@@ -23,7 +23,7 @@
lucicfg.check_version("1.30.9", "Please update depot_tools")
luci.builder.defaults.experiments.set({
- "luci.recipes.use_python3": 10,
+ "luci.recipes.use_python3": 100,
})
# Use LUCI Scheduler BBv2 names and add Scheduler realms configs.
diff --git a/tools/run-libcore-tests.py b/tools/run-libcore-tests.py
index 1e6070a..8da79c1 100755
--- a/tools/run-libcore-tests.py
+++ b/tools/run-libcore-tests.py
@@ -159,21 +159,15 @@
"test.java.math.BigInteger#testDivideAndReminder",
"test.java.math.BigInteger#testDivideLarge",
"test.java.math.BigInteger#testModExp",
- "test.java.math.BigInteger#testModInv",
"test.java.math.BigInteger#testMultiplyLarge",
"test.java.math.BigInteger#testNextProbablePrime",
"test.java.math.BigInteger#testPow",
- "test.java.math.BigInteger#testPrime",
"test.java.math.BigInteger#testSerialize",
"test.java.math.BigInteger#testShift",
"test.java.math.BigInteger#testSquare",
"test.java.math.BigInteger#testSquareLarge",
- "test.java.math.BigInteger#testSquareRoot",
"test.java.math.BigInteger#testSquareRootAndReminder",
"test.java.math.BigInteger#testStringConv_generic",
- "test.java.math.BigInteger#testStringConv_schoenhage_threshold_pow0",
- "test.java.math.BigInteger#testStringConv_schoenhage_threshold_pow1",
- "test.java.math.BigInteger#testStringConv_schoenhage_threshold_pow2",
"test.java.math.RoundingMode",
# test.java.net
"test.java.net.DatagramSocket",
@@ -190,7 +184,6 @@
"test.java.security.cert",
# Sharded test.java.security.KeyAgreement
"test.java.security.KeyAgreement.KeyAgreementTest",
- "test.java.security.KeyAgreement.KeySizeTest#testDHKeySize",
"test.java.security.KeyAgreement.KeySizeTest#testECDHKeySize",
"test.java.security.KeyAgreement.KeySpecTest",
"test.java.security.KeyAgreement.MultiThreadTest",
@@ -242,6 +235,20 @@
CLASSPATH = ["core-tests", "core-ojtests", "jsr166-tests", "mockito-target"]
+SLOW_OJLUNI_TESTS = {
+ "test.java.awt",
+ "test.java.lang.String",
+ "test.java.lang.invoke",
+ "test.java.nio.channels.Selector",
+ "test.java.time",
+ "test.java.util.Arrays",
+ "test.java.util.Map",
+ "test.java.util.concurrent",
+ "test.java.util.stream",
+ "test.java.util.zip.ZipFile",
+ "tck.java.time",
+}
+
def get_jar_filename(classpath):
base_path = (ANDROID_PRODUCT_OUT + "/../..") if ANDROID_PRODUCT_OUT else "out/target"
base_path = os.path.normpath(base_path) # Normalize ".." components for readability.
@@ -275,6 +282,7 @@
# See b/78228743 and b/178351808.
if args.gcstress or args.debug or args.mode == "jvm":
test_names = list(t for t in test_names if not t.startswith("libcore.highmemorytest"))
+ test_names = list(filter(lambda x: x not in SLOW_OJLUNI_TESTS, test_names))
return test_names
def get_vogar_command(test_name):
diff --git a/tools/tracefast-plugin/tracefast.cc b/tools/tracefast-plugin/tracefast.cc
index 618742d..734dce0 100644
--- a/tools/tracefast-plugin/tracefast.cc
+++ b/tools/tracefast-plugin/tracefast.cc
@@ -60,7 +60,6 @@
override REQUIRES_SHARED(art::Locks::mutator_lock_) { }
void MethodUnwind(art::Thread* thread ATTRIBUTE_UNUSED,
- art::Handle<art::mirror::Object> this_object ATTRIBUTE_UNUSED,
art::ArtMethod* method ATTRIBUTE_UNUSED,
uint32_t dex_pc ATTRIBUTE_UNUSED)
override REQUIRES_SHARED(art::Locks::mutator_lock_) { }
diff --git a/tools/veridex/Android.mk b/tools/veridex/Android.mk
index 614952f..9ea9b3a 100644
--- a/tools/veridex/Android.mk
+++ b/tools/veridex/Android.mk
@@ -23,14 +23,14 @@
system_stub_dex := $(TARGET_OUT_COMMON_INTERMEDIATES)/PACKAGING/core_dex_intermediates/classes.dex
$(system_stub_dex): PRIVATE_MIN_SDK_VERSION := 1000
-$(system_stub_dex): $(call resolve-prebuilt-sdk-jar-path,system_current) | $(ZIP2ZIP) $(DX)
+$(system_stub_dex): $(call resolve-prebuilt-sdk-jar-path,system_current) | $(ZIP2ZIP) $(D8)
$(transform-classes.jar-to-dex)
$(call declare-1p-target,$(system_stub_dex),art)
oahl_stub_dex := $(TARGET_OUT_COMMON_INTERMEDIATES)/PACKAGING/oahl_dex_intermediates/classes.dex
$(oahl_stub_dex): PRIVATE_MIN_SDK_VERSION := 1000
-$(oahl_stub_dex): $(call get-prebuilt-sdk-dir,current)/org.apache.http.legacy.jar | $(ZIP2ZIP) $(DX)
+$(oahl_stub_dex): $(call get-prebuilt-sdk-dir,current)/org.apache.http.legacy.jar | $(ZIP2ZIP) $(D8)
$(transform-classes.jar-to-dex)
$(call declare-1p-target,$(oahl_stub_dex),art)
@@ -72,9 +72,6 @@
$(call declare-container-license-deps,$(VERIDEX_FILES_PATH),$(INTERNAL_PLATFORM_HIDDENAPI_FLAGS) \
$(HOST_OUT_EXECUTABLES)/veridex $(system_stub_dex) $(oahl_stub_dex),$(VERIDEX_FILES_PATH):)
-# Make the zip file available for prebuilts.
-$(call dist-for-goals,sdk,$(VERIDEX_FILES_PATH))
-
VERIDEX_FILES :=
system_stub_dex :=
oahl_stub_dex :=