diff options
author | 2018-01-04 18:42:57 +0000 | |
---|---|---|
committer | 2018-01-10 14:30:26 +0000 | |
commit | dc682aa9d0eae1a851af059434adb6f6cf8f06f8 (patch) | |
tree | f93f00493ee5887b05b42a6a5dd99eb6794daad4 /runtime/base/bit_string_test.cc | |
parent | d6b7e8c63f8eca25460f56f66dcae15eaa897ff0 (diff) |
Use 28 bits for type check bit string.
And reverse the order of fields in the Class::status_. This
avoids generated code size increase:
- ClassStatus in high bits allows class initialization
check using "status_high_byte < (kInitialized << 4)"
which is unaffected by the low 4 bits of LHS instead of
needing to extract the status bits,
- the type check bit string in the bottom bits instead of
somewehere in the middle allows the comparison on ARM
to be done using the same code size as with the old
layout in most cases (except when the compared value is
9-16 bits and not a modified immediate: 2 bytes less for
9-12 bits and sometimes 2 bytes more for 13-16 bits; the
latter could be worked around using LDRH if the second
character's boundary is at 16 bits).
Add one of the extra bits to the 2nd character to push its
boundary to 16 bits so that we can test an implementation
using 16-bit loads in a subsequent CL, arbitrarily add the
other three bits to the 3rd character. This CL is only
about making those bits available and allowing testing, the
determination of how to use the additonal bits for the best
impact (whether to have a 4th character or distribute them
differently among the three characters) shall be done later.
Test: m test-art-host-gtest
Test: testrunner.py --host --optimizing
Test: Pixel 2 XL boots.
Test: testrunner.py --target --optimizing
Bug: 64692057
Change-Id: I38c59837e3df3accb813fb1e04dc42e9afcd2d73
Diffstat (limited to 'runtime/base/bit_string_test.cc')
-rw-r--r-- | runtime/base/bit_string_test.cc | 26 |
1 files changed, 12 insertions, 14 deletions
diff --git a/runtime/base/bit_string_test.cc b/runtime/base/bit_string_test.cc index 96aa154ef3..23274e3f2f 100644 --- a/runtime/base/bit_string_test.cc +++ b/runtime/base/bit_string_test.cc @@ -65,7 +65,7 @@ size_t AsUint(const T& value) { return uint_value; } -// Make max bitstring, e.g. BitString[4095,7,255] for {12,3,8} +// Make max bitstring, e.g. BitString[4095,15,2047] for {12,4,11} template <size_t kCount = BitString::kCapacity> BitString MakeBitStringMax() { BitString bs{}; @@ -87,15 +87,14 @@ BitString SetBitStringCharAt(BitString bit_string, size_t i, size_t val) { #define EXPECT_BITSTRING_STR(expected_str, actual_value) \ EXPECT_STREQ((expected_str), Stringify((actual_value)).c_str()) +// TODO: Consider removing this test, it's kind of replicating the logic in GetLsbForPosition(). TEST(InstanceOfBitString, GetLsbForPosition) { ASSERT_LE(3u, BitString::kCapacity); // Test will fail if kCapacity is not at least 3. Update it. - EXPECT_EQ(0u, BitString::GetLsbForPosition(BitString::kCapacity - 1u)); - EXPECT_EQ(BitString::kBitSizeAtPosition[BitString::kCapacity - 1u], - BitString::GetLsbForPosition(BitString::kCapacity - 2u)); - EXPECT_EQ(BitString::kBitSizeAtPosition[BitString::kCapacity - 1u] + - BitString::kBitSizeAtPosition[BitString::kCapacity - 2u], - BitString::GetLsbForPosition(BitString::kCapacity - 3u)); + EXPECT_EQ(0u, BitString::GetLsbForPosition(0u)); + EXPECT_EQ(BitString::kBitSizeAtPosition[0u], BitString::GetLsbForPosition(1u)); + EXPECT_EQ(BitString::kBitSizeAtPosition[0u] + BitString::kBitSizeAtPosition[1u], + BitString::GetLsbForPosition(2u)); } TEST(InstanceOfBitString, ToString) { @@ -126,8 +125,8 @@ TEST(InstanceOfBitString, ReadWrite) { // Each maximal value should be tested here for each position. uint32_t max_bitstring_ints[] = { MaxInt<uint32_t>(12), - MaxInt<uint32_t>(3), - MaxInt<uint32_t>(8), + MaxInt<uint32_t>(4), + MaxInt<uint32_t>(11), }; // Update tests if changing the tuning values above. @@ -151,14 +150,13 @@ constexpr auto MaxForPos() { } TEST(InstanceOfBitString, MemoryRepresentation) { - // Verify that the lower positions are stored in more significant bits. + // Verify that the lower positions are stored in less significant bits. BitString bs = MakeBitString({MaxForPos<0>(), MaxForPos<1>()}); BitString::StorageType as_int = static_cast<BitString::StorageType>(bs); - // Below tests assumes the capacity is 3. Update if it this changes. - ASSERT_EQ(3u, BitString::kCapacity); - EXPECT_EQ(MaxForPos<0>() << (BitString::kBitSizeAtPosition[2] + BitString::kBitSizeAtPosition[1]) | - (MaxForPos<1>() << BitString::kBitSizeAtPosition[2]), + // Below tests assumes the capacity is at least 3. + ASSERT_LE(3u, BitString::kCapacity); + EXPECT_EQ((MaxForPos<0>() << 0) | (MaxForPos<1>() << BitString::kBitSizeAtPosition[0]), as_int); } |