summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
author Ian Rogers <irogers@google.com> 2013-02-05 18:29:08 -0800
committer Ian Rogers <irogers@google.com> 2013-02-05 18:29:08 -0800
commit1ffa32f0be7becec4907b26ead353e4b17e1219c (patch)
tree4b023e55080f75a4945a44588d12bfbb3aec8bd3
parentc0fa3ad44a84b7f658d16a717027bf95abc85db6 (diff)
Reduce inlining in debug builds.
Fixes 018-stack-overflow on the host with interpreter. Change-Id: Ieed091b341b7812cfe898421a74d2f41f6a6a8bc
-rw-r--r--build/Android.common.mk1
-rw-r--r--src/base/macros.h6
-rw-r--r--src/base/mutex.h4
-rw-r--r--src/mirror/class.h2
-rw-r--r--src/scoped_thread_state_change.h13
-rw-r--r--src/thread.h4
6 files changed, 19 insertions, 11 deletions
diff --git a/build/Android.common.mk b/build/Android.common.mk
index 21e829ce6e..f613399d58 100644
--- a/build/Android.common.mk
+++ b/build/Android.common.mk
@@ -77,6 +77,7 @@ ifeq ($(HOST_OS),linux)
endif
art_debug_cflags := \
+ -fno-inline \
-DDYNAMIC_ANNOTATIONS_ENABLED=1 \
-UNDEBUG
diff --git a/src/base/macros.h b/src/base/macros.h
index 52013dacc2..48cb9c013a 100644
--- a/src/base/macros.h
+++ b/src/base/macros.h
@@ -130,6 +130,12 @@ char (&ArraySizeHelper(T (&array)[N]))[N];
#define LIKELY(x) __builtin_expect((x), true)
#define UNLIKELY(x) __builtin_expect((x), false)
+#ifdef NDEBUG
+#define ALWAYS_INLINE
+#else
+#define ALWAYS_INLINE __attribute__((always_inline))
+#endif
+
// bionic and glibc both have TEMP_FAILURE_RETRY, but Mac OS' libc doesn't.
#ifndef TEMP_FAILURE_RETRY
#define TEMP_FAILURE_RETRY(exp) ({ \
diff --git a/src/base/mutex.h b/src/base/mutex.h
index b530b752dc..b4e05365e9 100644
--- a/src/base/mutex.h
+++ b/src/base/mutex.h
@@ -223,14 +223,14 @@ class LOCKABLE ReaderWriterMutex : public BaseMutex {
#endif
// Block until ReaderWriterMutex is shared or free then acquire a share on the access.
- void SharedLock(Thread* self) SHARED_LOCK_FUNCTION() __attribute__ ((always_inline));
+ void SharedLock(Thread* self) SHARED_LOCK_FUNCTION() ALWAYS_INLINE;
void ReaderLock(Thread* self) SHARED_LOCK_FUNCTION() { SharedLock(self); }
// Try to acquire share of ReaderWriterMutex.
bool SharedTryLock(Thread* self) EXCLUSIVE_TRYLOCK_FUNCTION(true);
// Release a share of the access.
- void SharedUnlock(Thread* self) UNLOCK_FUNCTION() __attribute__ ((always_inline));
+ void SharedUnlock(Thread* self) UNLOCK_FUNCTION() ALWAYS_INLINE;
void ReaderUnlock(Thread* self) UNLOCK_FUNCTION() { SharedUnlock(self); }
// Is the current thread the exclusive holder of the ReaderWriterMutex.
diff --git a/src/mirror/class.h b/src/mirror/class.h
index 9e440b46a7..afab3141fe 100644
--- a/src/mirror/class.h
+++ b/src/mirror/class.h
@@ -542,7 +542,7 @@ class MANAGED Class : public StaticStorageBase {
// super class or interface, return the specific implementation
// method for this class.
AbstractMethod* FindVirtualMethodForInterface(AbstractMethod* method) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) __attribute__ ((always_inline, hot));
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE;
AbstractMethod* FindInterfaceMethod(const StringPiece& name, const StringPiece& descriptor) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/src/scoped_thread_state_change.h b/src/scoped_thread_state_change.h
index 709109ac85..81db2ecf60 100644
--- a/src/scoped_thread_state_change.h
+++ b/src/scoped_thread_state_change.h
@@ -30,7 +30,7 @@ namespace art {
class ScopedThreadStateChange {
public:
ScopedThreadStateChange(Thread* self, ThreadState new_thread_state)
- LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) __attribute__ ((always_inline))
+ LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) ALWAYS_INLINE
: self_(self), thread_state_(new_thread_state), expected_has_no_thread_(false) {
if (UNLIKELY(self_ == NULL)) {
// Value chosen arbitrarily and won't be used in the destructor since thread_ == NULL.
@@ -61,7 +61,7 @@ class ScopedThreadStateChange {
}
}
- ~ScopedThreadStateChange() LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) __attribute__ ((always_inline)) {
+ ~ScopedThreadStateChange() LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) ALWAYS_INLINE {
if (UNLIKELY(self_ == NULL)) {
if (!expected_has_no_thread_) {
MutexLock mu(NULL, *Locks::runtime_shutdown_lock_);
@@ -120,7 +120,7 @@ class ScopedThreadStateChange {
class ScopedObjectAccessUnchecked : public ScopedThreadStateChange {
public:
explicit ScopedObjectAccessUnchecked(JNIEnv* env)
- LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) __attribute__ ((always_inline))
+ LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) ALWAYS_INLINE
: ScopedThreadStateChange(ThreadForEnv(env), kRunnable),
env_(reinterpret_cast<JNIEnvExt*>(env)), vm_(env_->vm) {
self_->VerifyStack();
@@ -139,7 +139,8 @@ class ScopedObjectAccessUnchecked : public ScopedThreadStateChange {
explicit ScopedObjectAccessUnchecked(JavaVM* vm)
: ScopedThreadStateChange(), env_(NULL), vm_(reinterpret_cast<JavaVMExt*>(vm)) {}
- ~ScopedObjectAccessUnchecked() __attribute__ ((always_inline)) {
+ // Here purely to force inlining.
+ ~ScopedObjectAccessUnchecked() ALWAYS_INLINE {
}
JNIEnvExt* Env() const {
@@ -275,7 +276,7 @@ class ScopedObjectAccess : public ScopedObjectAccessUnchecked {
public:
explicit ScopedObjectAccess(JNIEnv* env)
LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
- SHARED_LOCK_FUNCTION(Locks::mutator_lock_) __attribute__ ((always_inline))
+ SHARED_LOCK_FUNCTION(Locks::mutator_lock_) ALWAYS_INLINE
: ScopedObjectAccessUnchecked(env) {
Locks::mutator_lock_->AssertSharedHeld(Self());
}
@@ -287,7 +288,7 @@ class ScopedObjectAccess : public ScopedObjectAccessUnchecked {
Locks::mutator_lock_->AssertSharedHeld(Self());
}
- ~ScopedObjectAccess() UNLOCK_FUNCTION(Locks::mutator_lock_) __attribute__ ((always_inline)) {
+ ~ScopedObjectAccess() UNLOCK_FUNCTION(Locks::mutator_lock_) ALWAYS_INLINE {
// Base class will release share of lock. Invoked after this destructor.
}
diff --git a/src/thread.h b/src/thread.h
index 58de45de65..7f8bd7e3b6 100644
--- a/src/thread.h
+++ b/src/thread.h
@@ -170,14 +170,14 @@ class PACKED(4) Thread {
ThreadState TransitionFromSuspendedToRunnable()
LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
SHARED_LOCK_FUNCTION(Locks::mutator_lock_)
- __attribute__ ((always_inline));
+ ALWAYS_INLINE;
// Transition from runnable into a state where mutator privileges are denied. Releases share of
// mutator lock.
void TransitionFromRunnableToSuspended(ThreadState new_state)
LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
UNLOCK_FUNCTION(Locks::mutator_lock_)
- __attribute__ ((always_inline));
+ ALWAYS_INLINE;
// Wait for a debugger suspension on the thread associated with the given peer. Returns the
// thread on success, else NULL. If the thread should be suspended then request_suspension should