Remove the old ARM assemblers from ART.
Now that the old ARM code generator for ART's Optimizing
compiler is gone, these assemblers no longer have users;
retiring them.
Test: test.py
Bug: 63316036
Change-Id: Iaea42432a9e0d3288b71615f85c58846c0336944
diff --git a/compiler/Android.bp b/compiler/Android.bp
index e61c95b..75086f7 100644
--- a/compiler/Android.bp
+++ b/compiler/Android.bp
@@ -113,9 +113,8 @@
"optimizing/intrinsics_arm_vixl.cc",
"optimizing/nodes_shared.cc",
"optimizing/scheduler_arm.cc",
- "utils/arm/assembler_arm.cc",
"utils/arm/assembler_arm_vixl.cc",
- "utils/arm/assembler_thumb2.cc",
+ "utils/arm/constants_arm.cc",
"utils/arm/jni_macro_assembler_arm_vixl.cc",
"utils/arm/managed_register_arm.cc",
],
@@ -450,7 +449,6 @@
codegen: {
arm: {
srcs: [
- "utils/arm/assembler_thumb2_test.cc",
"utils/assembler_thumb_test.cc",
],
},
diff --git a/compiler/optimizing/instruction_simplifier_arm.cc b/compiler/optimizing/instruction_simplifier_arm.cc
index 3fc7c50..fe22595 100644
--- a/compiler/optimizing/instruction_simplifier_arm.cc
+++ b/compiler/optimizing/instruction_simplifier_arm.cc
@@ -147,8 +147,8 @@
Primitive::Type type = instruction->GetType();
// TODO: Implement reading (length + compression) for String compression feature from
- // negative offset (count_offset - data_offset). Thumb2Assembler does not support T4
- // encoding of "LDR (immediate)" at the moment.
+ // negative offset (count_offset - data_offset). Thumb2Assembler (now removed) did
+ // not support T4 encoding of "LDR (immediate)", but ArmVIXLMacroAssembler might.
// Don't move array pointer if it is charAt because we need to take the count first.
if (mirror::kUseStringCompression && instruction->IsStringCharAt()) {
return;
diff --git a/compiler/utils/arm/assembler_arm.cc b/compiler/utils/arm/assembler_arm.cc
deleted file mode 100644
index d5cd59d..0000000
--- a/compiler/utils/arm/assembler_arm.cc
+++ /dev/null
@@ -1,453 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "assembler_arm.h"
-
-#include <algorithm>
-
-#include "base/bit_utils.h"
-#include "base/logging.h"
-#include "entrypoints/quick/quick_entrypoints.h"
-#include "offsets.h"
-#include "thread.h"
-
-namespace art {
-namespace arm {
-
-const char* kRegisterNames[] = {
- "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10",
- "fp", "ip", "sp", "lr", "pc"
-};
-
-const char* kConditionNames[] = {
- "EQ", "NE", "CS", "CC", "MI", "PL", "VS", "VC", "HI", "LS", "GE", "LT", "GT",
- "LE", "AL",
-};
-
-std::ostream& operator<<(std::ostream& os, const Register& rhs) {
- if (rhs >= R0 && rhs <= PC) {
- os << kRegisterNames[rhs];
- } else {
- os << "Register[" << static_cast<int>(rhs) << "]";
- }
- return os;
-}
-
-
-std::ostream& operator<<(std::ostream& os, const SRegister& rhs) {
- if (rhs >= S0 && rhs < kNumberOfSRegisters) {
- os << "s" << static_cast<int>(rhs);
- } else {
- os << "SRegister[" << static_cast<int>(rhs) << "]";
- }
- return os;
-}
-
-
-std::ostream& operator<<(std::ostream& os, const DRegister& rhs) {
- if (rhs >= D0 && rhs < kNumberOfDRegisters) {
- os << "d" << static_cast<int>(rhs);
- } else {
- os << "DRegister[" << static_cast<int>(rhs) << "]";
- }
- return os;
-}
-
-std::ostream& operator<<(std::ostream& os, const Condition& rhs) {
- if (rhs >= EQ && rhs <= AL) {
- os << kConditionNames[rhs];
- } else {
- os << "Condition[" << static_cast<int>(rhs) << "]";
- }
- return os;
-}
-
-ShifterOperand::ShifterOperand(uint32_t immed)
- : type_(kImmediate), rm_(kNoRegister), rs_(kNoRegister),
- is_rotate_(false), is_shift_(false), shift_(kNoShift), rotate_(0), immed_(immed) {
- CHECK(immed < (1u << 12) || ArmAssembler::ModifiedImmediate(immed) != kInvalidModifiedImmediate);
-}
-
-
-uint32_t ShifterOperand::encodingArm() const {
- CHECK(is_valid());
- switch (type_) {
- case kImmediate:
- if (is_rotate_) {
- return (rotate_ << kRotateShift) | (immed_ << kImmed8Shift);
- } else {
- return immed_;
- }
- case kRegister:
- if (is_shift_) {
- uint32_t shift_type;
- switch (shift_) {
- case arm::Shift::ROR:
- shift_type = static_cast<uint32_t>(shift_);
- CHECK_NE(immed_, 0U);
- break;
- case arm::Shift::RRX:
- shift_type = static_cast<uint32_t>(arm::Shift::ROR); // Same encoding as ROR.
- CHECK_EQ(immed_, 0U);
- break;
- default:
- shift_type = static_cast<uint32_t>(shift_);
- }
- // Shifted immediate or register.
- if (rs_ == kNoRegister) {
- // Immediate shift.
- return immed_ << kShiftImmShift |
- shift_type << kShiftShift |
- static_cast<uint32_t>(rm_);
- } else {
- // Register shift.
- return static_cast<uint32_t>(rs_) << kShiftRegisterShift |
- shift_type << kShiftShift | (1 << 4) |
- static_cast<uint32_t>(rm_);
- }
- } else {
- // Simple register
- return static_cast<uint32_t>(rm_);
- }
- default:
- // Can't get here.
- LOG(FATAL) << "Invalid shifter operand for ARM";
- return 0;
- }
-}
-
-uint32_t ShifterOperand::encodingThumb() const {
- switch (type_) {
- case kImmediate:
- return immed_;
- case kRegister:
- if (is_shift_) {
- // Shifted immediate or register.
- if (rs_ == kNoRegister) {
- // Immediate shift.
- if (shift_ == RRX) {
- DCHECK_EQ(immed_, 0u);
- // RRX is encoded as an ROR with imm 0.
- return ROR << 4 | static_cast<uint32_t>(rm_);
- } else {
- DCHECK((1 <= immed_ && immed_ <= 31) ||
- (immed_ == 0u && shift_ == LSL) ||
- (immed_ == 32u && (shift_ == ASR || shift_ == LSR)));
- uint32_t imm3 = (immed_ >> 2) & 7 /* 0b111*/;
- uint32_t imm2 = immed_ & 3U /* 0b11 */;
-
- return imm3 << 12 | imm2 << 6 | shift_ << 4 |
- static_cast<uint32_t>(rm_);
- }
- } else {
- LOG(FATAL) << "No register-shifted register instruction available in thumb";
- return 0;
- }
- } else {
- // Simple register
- return static_cast<uint32_t>(rm_);
- }
- default:
- // Can't get here.
- LOG(FATAL) << "Invalid shifter operand for thumb";
- UNREACHABLE();
- }
-}
-
-uint32_t Address::encodingArm() const {
- CHECK(IsAbsoluteUint<12>(offset_));
- uint32_t encoding;
- if (is_immed_offset_) {
- if (offset_ < 0) {
- encoding = (am_ ^ (1 << kUShift)) | -offset_; // Flip U to adjust sign.
- } else {
- encoding = am_ | offset_;
- }
- } else {
- uint32_t shift = shift_;
- if (shift == RRX) {
- CHECK_EQ(offset_, 0);
- shift = ROR;
- }
- encoding = am_ | static_cast<uint32_t>(rm_) | shift << 5 | offset_ << 7 | B25;
- }
- encoding |= static_cast<uint32_t>(rn_) << kRnShift;
- return encoding;
-}
-
-
-uint32_t Address::encodingThumb(bool is_32bit) const {
- uint32_t encoding = 0;
- if (is_immed_offset_) {
- encoding = static_cast<uint32_t>(rn_) << 16;
- // Check for the T3/T4 encoding.
- // PUW must Offset for T3
- // Convert ARM PU0W to PUW
- // The Mode is in ARM encoding format which is:
- // |P|U|0|W|
- // we need this in thumb2 mode:
- // |P|U|W|
-
- uint32_t am = am_;
- int32_t offset = offset_;
- if (offset < 0) {
- am ^= 1 << kUShift;
- offset = -offset;
- }
- if (offset_ < 0 || (offset >= 0 && offset < 256 &&
- am_ != Mode::Offset)) {
- // T4 encoding.
- uint32_t PUW = am >> 21; // Move down to bottom of word.
- PUW = (PUW >> 1) | (PUW & 1); // Bits 3, 2 and 0.
- // If P is 0 then W must be 1 (Different from ARM).
- if ((PUW & 4U /* 0b100 */) == 0) {
- PUW |= 1U /* 0b1 */;
- }
- encoding |= B11 | PUW << 8 | offset;
- } else {
- // T3 encoding (also sets op1 to 0b01).
- encoding |= B23 | offset_;
- }
- } else {
- // Register offset, possibly shifted.
- // Need to choose between encoding T1 (16 bit) or T2.
- // Only Offset mode is supported. Shift must be LSL and the count
- // is only 2 bits.
- CHECK_EQ(shift_, LSL);
- CHECK_LE(offset_, 4);
- CHECK_EQ(am_, Offset);
- bool is_t2 = is_32bit;
- if (ArmAssembler::IsHighRegister(rn_) || ArmAssembler::IsHighRegister(rm_)) {
- is_t2 = true;
- } else if (offset_ != 0) {
- is_t2 = true;
- }
- if (is_t2) {
- encoding = static_cast<uint32_t>(rn_) << 16 | static_cast<uint32_t>(rm_) |
- offset_ << 4;
- } else {
- encoding = static_cast<uint32_t>(rn_) << 3 | static_cast<uint32_t>(rm_) << 6;
- }
- }
- return encoding;
-}
-
-// This is very like the ARM encoding except the offset is 10 bits.
-uint32_t Address::encodingThumbLdrdStrd() const {
- DCHECK(IsImmediate());
- uint32_t encoding;
- uint32_t am = am_;
- // If P is 0 then W must be 1 (Different from ARM).
- uint32_t PU1W = am_ >> 21; // Move down to bottom of word.
- if ((PU1W & 8U /* 0b1000 */) == 0) {
- am |= 1 << 21; // Set W bit.
- }
- if (offset_ < 0) {
- int32_t off = -offset_;
- CHECK_LT(off, 1024);
- CHECK_ALIGNED(off, 4);
- encoding = (am ^ (1 << kUShift)) | off >> 2; // Flip U to adjust sign.
- } else {
- CHECK_LT(offset_, 1024);
- CHECK_ALIGNED(offset_, 4);
- encoding = am | offset_ >> 2;
- }
- encoding |= static_cast<uint32_t>(rn_) << 16;
- return encoding;
-}
-
-// Encoding for ARM addressing mode 3.
-uint32_t Address::encoding3() const {
- const uint32_t offset_mask = (1 << 12) - 1;
- uint32_t encoding = encodingArm();
- uint32_t offset = encoding & offset_mask;
- CHECK_LT(offset, 256u);
- return (encoding & ~offset_mask) | ((offset & 0xf0) << 4) | (offset & 0xf);
-}
-
-// Encoding for vfp load/store addressing.
-uint32_t Address::vencoding() const {
- CHECK(IsAbsoluteUint<10>(offset_)); // In the range -1020 to +1020.
- CHECK_ALIGNED(offset_, 2); // Multiple of 4.
-
- const uint32_t offset_mask = (1 << 12) - 1;
- uint32_t encoding = encodingArm();
- uint32_t offset = encoding & offset_mask;
- CHECK((am_ == Offset) || (am_ == NegOffset));
- uint32_t vencoding_value = (encoding & (0xf << kRnShift)) | (offset >> 2);
- if (am_ == Offset) {
- vencoding_value |= 1 << 23;
- }
- return vencoding_value;
-}
-
-
-bool Address::CanHoldLoadOffsetArm(LoadOperandType type, int offset) {
- switch (type) {
- case kLoadSignedByte:
- case kLoadSignedHalfword:
- case kLoadUnsignedHalfword:
- case kLoadWordPair:
- return IsAbsoluteUint<8>(offset); // Addressing mode 3.
- case kLoadUnsignedByte:
- case kLoadWord:
- return IsAbsoluteUint<12>(offset); // Addressing mode 2.
- case kLoadSWord:
- case kLoadDWord:
- return IsAbsoluteUint<10>(offset); // VFP addressing mode.
- default:
- LOG(FATAL) << "UNREACHABLE";
- UNREACHABLE();
- }
-}
-
-
-bool Address::CanHoldStoreOffsetArm(StoreOperandType type, int offset) {
- switch (type) {
- case kStoreHalfword:
- case kStoreWordPair:
- return IsAbsoluteUint<8>(offset); // Addressing mode 3.
- case kStoreByte:
- case kStoreWord:
- return IsAbsoluteUint<12>(offset); // Addressing mode 2.
- case kStoreSWord:
- case kStoreDWord:
- return IsAbsoluteUint<10>(offset); // VFP addressing mode.
- default:
- LOG(FATAL) << "UNREACHABLE";
- UNREACHABLE();
- }
-}
-
-bool Address::CanHoldLoadOffsetThumb(LoadOperandType type, int offset) {
- switch (type) {
- case kLoadSignedByte:
- case kLoadSignedHalfword:
- case kLoadUnsignedHalfword:
- case kLoadUnsignedByte:
- case kLoadWord:
- return IsAbsoluteUint<12>(offset);
- case kLoadSWord:
- case kLoadDWord:
- return IsAbsoluteUint<10>(offset) && (offset & 3) == 0; // VFP addressing mode.
- case kLoadWordPair:
- return IsAbsoluteUint<10>(offset) && (offset & 3) == 0;
- default:
- LOG(FATAL) << "UNREACHABLE";
- UNREACHABLE();
- }
-}
-
-
-bool Address::CanHoldStoreOffsetThumb(StoreOperandType type, int offset) {
- switch (type) {
- case kStoreHalfword:
- case kStoreByte:
- case kStoreWord:
- return IsAbsoluteUint<12>(offset);
- case kStoreSWord:
- case kStoreDWord:
- return IsAbsoluteUint<10>(offset) && (offset & 3) == 0; // VFP addressing mode.
- case kStoreWordPair:
- return IsAbsoluteUint<10>(offset) && (offset & 3) == 0;
- default:
- LOG(FATAL) << "UNREACHABLE";
- UNREACHABLE();
- }
-}
-
-void ArmAssembler::Pad(uint32_t bytes) {
- AssemblerBuffer::EnsureCapacity ensured(&buffer_);
- for (uint32_t i = 0; i < bytes; ++i) {
- buffer_.Emit<uint8_t>(0);
- }
-}
-
-static int LeadingZeros(uint32_t val) {
- uint32_t alt;
- int32_t n;
- int32_t count;
-
- count = 16;
- n = 32;
- do {
- alt = val >> count;
- if (alt != 0) {
- n = n - count;
- val = alt;
- }
- count >>= 1;
- } while (count);
- return n - val;
-}
-
-
-uint32_t ArmAssembler::ModifiedImmediate(uint32_t value) {
- int32_t z_leading;
- int32_t z_trailing;
- uint32_t b0 = value & 0xff;
-
- /* Note: case of value==0 must use 0:000:0:0000000 encoding */
- if (value <= 0xFF)
- return b0; // 0:000:a:bcdefgh.
- if (value == ((b0 << 16) | b0))
- return (0x1 << 12) | b0; /* 0:001:a:bcdefgh */
- if (value == ((b0 << 24) | (b0 << 16) | (b0 << 8) | b0))
- return (0x3 << 12) | b0; /* 0:011:a:bcdefgh */
- b0 = (value >> 8) & 0xff;
- if (value == ((b0 << 24) | (b0 << 8)))
- return (0x2 << 12) | b0; /* 0:010:a:bcdefgh */
- /* Can we do it with rotation? */
- z_leading = LeadingZeros(value);
- z_trailing = 32 - LeadingZeros(~value & (value - 1));
- /* A run of eight or fewer active bits? */
- if ((z_leading + z_trailing) < 24)
- return kInvalidModifiedImmediate; /* No - bail */
- /* left-justify the constant, discarding msb (known to be 1) */
- value <<= z_leading + 1;
- /* Create bcdefgh */
- value >>= 25;
-
- /* Put it all together */
- uint32_t v = 8 + z_leading;
-
- uint32_t i = (v & 16U /* 0b10000 */) >> 4;
- uint32_t imm3 = (v >> 1) & 7U /* 0b111 */;
- uint32_t a = v & 1;
- return value | i << 26 | imm3 << 12 | a << 7;
-}
-
-void ArmAssembler::FinalizeTrackedLabels() {
- if (!tracked_labels_.empty()) {
- // This array should be sorted, as assembly is generated in linearized order. It isn't
- // technically required, but GetAdjustedPosition() used in AdjustLabelPosition() can take
- // advantage of it. So ensure that it's actually the case.
- DCHECK(std::is_sorted(
- tracked_labels_.begin(),
- tracked_labels_.end(),
- [](const Label* lhs, const Label* rhs) { return lhs->Position() < rhs->Position(); }));
-
- Label* last_label = nullptr; // Track duplicates, we must not adjust twice.
- for (Label* label : tracked_labels_) {
- DCHECK_NE(label, last_label);
- AdjustLabelPosition(label);
- last_label = label;
- }
- }
-}
-
-} // namespace arm
-} // namespace art
diff --git a/compiler/utils/arm/assembler_arm.h b/compiler/utils/arm/assembler_arm.h
deleted file mode 100644
index bb23a29..0000000
--- a/compiler/utils/arm/assembler_arm.h
+++ /dev/null
@@ -1,942 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_COMPILER_UTILS_ARM_ASSEMBLER_ARM_H_
-#define ART_COMPILER_UTILS_ARM_ASSEMBLER_ARM_H_
-
-#include <type_traits>
-#include <vector>
-
-#include "base/arena_allocator.h"
-#include "base/arena_containers.h"
-#include "base/bit_utils.h"
-#include "base/enums.h"
-#include "base/logging.h"
-#include "base/stl_util_identity.h"
-#include "base/value_object.h"
-#include "constants_arm.h"
-#include "utils/arm/assembler_arm_shared.h"
-#include "utils/arm/managed_register_arm.h"
-#include "utils/assembler.h"
-#include "utils/jni_macro_assembler.h"
-#include "offsets.h"
-
-namespace art {
-namespace arm {
-
-class Thumb2Assembler;
-
-// Assembler literal is a value embedded in code, retrieved using a PC-relative load.
-class Literal {
- public:
- static constexpr size_t kMaxSize = 8;
-
- Literal(uint32_t size, const uint8_t* data)
- : label_(), size_(size) {
- DCHECK_LE(size, Literal::kMaxSize);
- memcpy(data_, data, size);
- }
-
- template <typename T>
- T GetValue() const {
- DCHECK_EQ(size_, sizeof(T));
- T value;
- memcpy(&value, data_, sizeof(T));
- return value;
- }
-
- uint32_t GetSize() const {
- return size_;
- }
-
- const uint8_t* GetData() const {
- return data_;
- }
-
- Label* GetLabel() {
- return &label_;
- }
-
- const Label* GetLabel() const {
- return &label_;
- }
-
- private:
- Label label_;
- const uint32_t size_;
- uint8_t data_[kMaxSize];
-
- DISALLOW_COPY_AND_ASSIGN(Literal);
-};
-
-// Jump table: table of labels emitted after the literals. Similar to literals.
-class JumpTable {
- public:
- explicit JumpTable(std::vector<Label*>&& labels)
- : label_(), anchor_label_(), labels_(std::move(labels)) {
- }
-
- uint32_t GetSize() const {
- return static_cast<uint32_t>(labels_.size()) * sizeof(uint32_t);
- }
-
- const std::vector<Label*>& GetData() const {
- return labels_;
- }
-
- Label* GetLabel() {
- return &label_;
- }
-
- const Label* GetLabel() const {
- return &label_;
- }
-
- Label* GetAnchorLabel() {
- return &anchor_label_;
- }
-
- const Label* GetAnchorLabel() const {
- return &anchor_label_;
- }
-
- private:
- Label label_;
- Label anchor_label_;
- std::vector<Label*> labels_;
-
- DISALLOW_COPY_AND_ASSIGN(JumpTable);
-};
-
-class ShifterOperand {
- public:
- ShifterOperand() : type_(kUnknown), rm_(kNoRegister), rs_(kNoRegister),
- is_rotate_(false), is_shift_(false), shift_(kNoShift), rotate_(0), immed_(0) {
- }
-
- explicit ShifterOperand(uint32_t immed);
-
- // Data-processing operands - Register
- explicit ShifterOperand(Register rm) : type_(kRegister), rm_(rm), rs_(kNoRegister),
- is_rotate_(false), is_shift_(false), shift_(kNoShift), rotate_(0), immed_(0) {
- }
-
- ShifterOperand(uint32_t rotate, uint32_t immed8) : type_(kImmediate), rm_(kNoRegister),
- rs_(kNoRegister),
- is_rotate_(true), is_shift_(false), shift_(kNoShift), rotate_(rotate), immed_(immed8) {
- }
-
- ShifterOperand(Register rm, Shift shift, uint32_t shift_imm = 0) : type_(kRegister), rm_(rm),
- rs_(kNoRegister),
- is_rotate_(false), is_shift_(true), shift_(shift), rotate_(0), immed_(shift_imm) {
- }
-
- // Data-processing operands - Logical shift/rotate by register
- ShifterOperand(Register rm, Shift shift, Register rs) : type_(kRegister), rm_(rm),
- rs_(rs),
- is_rotate_(false), is_shift_(true), shift_(shift), rotate_(0), immed_(0) {
- }
-
- bool is_valid() const { return (type_ == kImmediate) || (type_ == kRegister); }
-
- uint32_t type() const {
- CHECK(is_valid());
- return type_;
- }
-
- uint32_t encodingArm() const;
- uint32_t encodingThumb() const;
-
- bool IsEmpty() const {
- return type_ == kUnknown;
- }
-
- bool IsImmediate() const {
- return type_ == kImmediate;
- }
-
- bool IsRegister() const {
- return type_ == kRegister;
- }
-
- bool IsShift() const {
- return is_shift_;
- }
-
- uint32_t GetImmediate() const {
- return immed_;
- }
-
- Shift GetShift() const {
- return shift_;
- }
-
- Register GetRegister() const {
- return rm_;
- }
-
- Register GetSecondRegister() const {
- return rs_;
- }
-
- enum Type {
- kUnknown = -1,
- kRegister,
- kImmediate
- };
-
- private:
- Type type_;
- Register rm_;
- Register rs_;
- bool is_rotate_;
- bool is_shift_;
- Shift shift_;
- uint32_t rotate_;
- uint32_t immed_;
-
- friend class Thumb2Assembler;
-
-#ifdef SOURCE_ASSEMBLER_SUPPORT
- friend class BinaryAssembler;
-#endif
-};
-
-// Load/store multiple addressing mode.
-enum BlockAddressMode {
- // bit encoding P U W
- DA = (0|0|0) << 21, // decrement after
- IA = (0|4|0) << 21, // increment after
- DB = (8|0|0) << 21, // decrement before
- IB = (8|4|0) << 21, // increment before
- DA_W = (0|0|1) << 21, // decrement after with writeback to base
- IA_W = (0|4|1) << 21, // increment after with writeback to base
- DB_W = (8|0|1) << 21, // decrement before with writeback to base
- IB_W = (8|4|1) << 21 // increment before with writeback to base
-};
-inline std::ostream& operator<<(std::ostream& os, const BlockAddressMode& rhs) {
- os << static_cast<int>(rhs);
- return os;
-}
-
-class Address : public ValueObject {
- public:
- // Memory operand addressing mode (in ARM encoding form. For others we need
- // to adjust)
- enum Mode {
- // bit encoding P U W
- Offset = (8|4|0) << 21, // offset (w/o writeback to base)
- PreIndex = (8|4|1) << 21, // pre-indexed addressing with writeback
- PostIndex = (0|4|0) << 21, // post-indexed addressing with writeback
- NegOffset = (8|0|0) << 21, // negative offset (w/o writeback to base)
- NegPreIndex = (8|0|1) << 21, // negative pre-indexed with writeback
- NegPostIndex = (0|0|0) << 21 // negative post-indexed with writeback
- };
-
- explicit Address(Register rn, int32_t offset = 0, Mode am = Offset) : rn_(rn), rm_(R0),
- offset_(offset),
- am_(am), is_immed_offset_(true), shift_(LSL) {
- }
-
- Address(Register rn, Register rm, Mode am = Offset) : rn_(rn), rm_(rm), offset_(0),
- am_(am), is_immed_offset_(false), shift_(LSL) {
- CHECK_NE(rm, PC);
- }
-
- Address(Register rn, Register rm, Shift shift, uint32_t count, Mode am = Offset) :
- rn_(rn), rm_(rm), offset_(count),
- am_(am), is_immed_offset_(false), shift_(shift) {
- CHECK_NE(rm, PC);
- }
-
- static bool CanHoldLoadOffsetArm(LoadOperandType type, int offset);
- static bool CanHoldStoreOffsetArm(StoreOperandType type, int offset);
-
- static bool CanHoldLoadOffsetThumb(LoadOperandType type, int offset);
- static bool CanHoldStoreOffsetThumb(StoreOperandType type, int offset);
-
- uint32_t encodingArm() const;
- uint32_t encodingThumb(bool is_32bit) const;
-
- uint32_t encoding3() const;
- uint32_t vencoding() const;
-
- uint32_t encodingThumbLdrdStrd() const;
-
- Register GetRegister() const {
- return rn_;
- }
-
- Register GetRegisterOffset() const {
- return rm_;
- }
-
- int32_t GetOffset() const {
- return offset_;
- }
-
- Mode GetMode() const {
- return am_;
- }
-
- bool IsImmediate() const {
- return is_immed_offset_;
- }
-
- Shift GetShift() const {
- return shift_;
- }
-
- int32_t GetShiftCount() const {
- CHECK(!is_immed_offset_);
- return offset_;
- }
-
- private:
- const Register rn_;
- const Register rm_;
- const int32_t offset_; // Used as shift amount for register offset.
- const Mode am_;
- const bool is_immed_offset_;
- const Shift shift_;
-};
-inline std::ostream& operator<<(std::ostream& os, const Address::Mode& rhs) {
- os << static_cast<int>(rhs);
- return os;
-}
-
-// Instruction encoding bits.
-enum {
- H = 1 << 5, // halfword (or byte)
- L = 1 << 20, // load (or store)
- S = 1 << 20, // set condition code (or leave unchanged)
- W = 1 << 21, // writeback base register (or leave unchanged)
- A = 1 << 21, // accumulate in multiply instruction (or not)
- B = 1 << 22, // unsigned byte (or word)
- N = 1 << 22, // long (or short)
- U = 1 << 23, // positive (or negative) offset/index
- P = 1 << 24, // offset/pre-indexed addressing (or post-indexed addressing)
- I = 1 << 25, // immediate shifter operand (or not)
-
- B0 = 1,
- B1 = 1 << 1,
- B2 = 1 << 2,
- B3 = 1 << 3,
- B4 = 1 << 4,
- B5 = 1 << 5,
- B6 = 1 << 6,
- B7 = 1 << 7,
- B8 = 1 << 8,
- B9 = 1 << 9,
- B10 = 1 << 10,
- B11 = 1 << 11,
- B12 = 1 << 12,
- B13 = 1 << 13,
- B14 = 1 << 14,
- B15 = 1 << 15,
- B16 = 1 << 16,
- B17 = 1 << 17,
- B18 = 1 << 18,
- B19 = 1 << 19,
- B20 = 1 << 20,
- B21 = 1 << 21,
- B22 = 1 << 22,
- B23 = 1 << 23,
- B24 = 1 << 24,
- B25 = 1 << 25,
- B26 = 1 << 26,
- B27 = 1 << 27,
- B28 = 1 << 28,
- B29 = 1 << 29,
- B30 = 1 << 30,
- B31 = 1 << 31,
-
- // Instruction bit masks.
- RdMask = 15 << 12, // in str instruction
- CondMask = 15 << 28,
- CoprocessorMask = 15 << 8,
- OpCodeMask = 15 << 21, // in data-processing instructions
- Imm24Mask = (1 << 24) - 1,
- Off12Mask = (1 << 12) - 1,
-
- // ldrex/strex register field encodings.
- kLdExRnShift = 16,
- kLdExRtShift = 12,
- kStrExRnShift = 16,
- kStrExRdShift = 12,
- kStrExRtShift = 0,
-};
-
-// IfThen state for IT instructions.
-enum ItState {
- kItOmitted,
- kItThen,
- kItT = kItThen,
- kItElse,
- kItE = kItElse
-};
-
-constexpr uint32_t kNoItCondition = 3;
-constexpr uint32_t kInvalidModifiedImmediate = -1;
-
-extern const char* kRegisterNames[];
-extern const char* kConditionNames[];
-
-// This is an abstract ARM assembler. Subclasses provide assemblers for the individual
-// instruction sets (ARM32, Thumb2, etc.)
-//
-class ArmAssembler : public Assembler {
- public:
- virtual ~ArmAssembler() {}
-
- // Is this assembler for the thumb instruction set?
- virtual bool IsThumb() const = 0;
-
- // Data-processing instructions.
- virtual void and_(Register rd, Register rn, const ShifterOperand& so,
- Condition cond = AL, SetCc set_cc = kCcDontCare) = 0;
-
- virtual void ands(Register rd, Register rn, const ShifterOperand& so, Condition cond = AL) {
- and_(rd, rn, so, cond, kCcSet);
- }
-
- virtual void eor(Register rd, Register rn, const ShifterOperand& so,
- Condition cond = AL, SetCc set_cc = kCcDontCare) = 0;
-
- virtual void eors(Register rd, Register rn, const ShifterOperand& so, Condition cond = AL) {
- eor(rd, rn, so, cond, kCcSet);
- }
-
- virtual void sub(Register rd, Register rn, const ShifterOperand& so,
- Condition cond = AL, SetCc set_cc = kCcDontCare) = 0;
-
- virtual void subs(Register rd, Register rn, const ShifterOperand& so, Condition cond = AL) {
- sub(rd, rn, so, cond, kCcSet);
- }
-
- virtual void rsb(Register rd, Register rn, const ShifterOperand& so,
- Condition cond = AL, SetCc set_cc = kCcDontCare) = 0;
-
- virtual void rsbs(Register rd, Register rn, const ShifterOperand& so, Condition cond = AL) {
- rsb(rd, rn, so, cond, kCcSet);
- }
-
- virtual void add(Register rd, Register rn, const ShifterOperand& so,
- Condition cond = AL, SetCc set_cc = kCcDontCare) = 0;
-
- virtual void adds(Register rd, Register rn, const ShifterOperand& so, Condition cond = AL) {
- add(rd, rn, so, cond, kCcSet);
- }
-
- virtual void adc(Register rd, Register rn, const ShifterOperand& so,
- Condition cond = AL, SetCc set_cc = kCcDontCare) = 0;
-
- virtual void adcs(Register rd, Register rn, const ShifterOperand& so, Condition cond = AL) {
- adc(rd, rn, so, cond, kCcSet);
- }
-
- virtual void sbc(Register rd, Register rn, const ShifterOperand& so,
- Condition cond = AL, SetCc set_cc = kCcDontCare) = 0;
-
- virtual void sbcs(Register rd, Register rn, const ShifterOperand& so, Condition cond = AL) {
- sbc(rd, rn, so, cond, kCcSet);
- }
-
- virtual void rsc(Register rd, Register rn, const ShifterOperand& so,
- Condition cond = AL, SetCc set_cc = kCcDontCare) = 0;
-
- virtual void rscs(Register rd, Register rn, const ShifterOperand& so, Condition cond = AL) {
- rsc(rd, rn, so, cond, kCcSet);
- }
-
- virtual void tst(Register rn, const ShifterOperand& so, Condition cond = AL) = 0;
-
- virtual void teq(Register rn, const ShifterOperand& so, Condition cond = AL) = 0;
-
- virtual void cmp(Register rn, const ShifterOperand& so, Condition cond = AL) = 0;
-
- // Note: CMN updates flags based on addition of its operands. Do not confuse
- // the "N" suffix with bitwise inversion performed by MVN.
- virtual void cmn(Register rn, const ShifterOperand& so, Condition cond = AL) = 0;
-
- virtual void orr(Register rd, Register rn, const ShifterOperand& so,
- Condition cond = AL, SetCc set_cc = kCcDontCare) = 0;
-
- virtual void orrs(Register rd, Register rn, const ShifterOperand& so, Condition cond = AL) {
- orr(rd, rn, so, cond, kCcSet);
- }
-
- virtual void orn(Register rd, Register rn, const ShifterOperand& so,
- Condition cond = AL, SetCc set_cc = kCcDontCare) = 0;
-
- virtual void orns(Register rd, Register rn, const ShifterOperand& so, Condition cond = AL) {
- orn(rd, rn, so, cond, kCcSet);
- }
-
- virtual void mov(Register rd, const ShifterOperand& so,
- Condition cond = AL, SetCc set_cc = kCcDontCare) = 0;
-
- virtual void movs(Register rd, const ShifterOperand& so, Condition cond = AL) {
- mov(rd, so, cond, kCcSet);
- }
-
- virtual void bic(Register rd, Register rn, const ShifterOperand& so,
- Condition cond = AL, SetCc set_cc = kCcDontCare) = 0;
-
- virtual void bics(Register rd, Register rn, const ShifterOperand& so, Condition cond = AL) {
- bic(rd, rn, so, cond, kCcSet);
- }
-
- virtual void mvn(Register rd, const ShifterOperand& so,
- Condition cond = AL, SetCc set_cc = kCcDontCare) = 0;
-
- virtual void mvns(Register rd, const ShifterOperand& so, Condition cond = AL) {
- mvn(rd, so, cond, kCcSet);
- }
-
- // Miscellaneous data-processing instructions.
- virtual void clz(Register rd, Register rm, Condition cond = AL) = 0;
- virtual void movw(Register rd, uint16_t imm16, Condition cond = AL) = 0;
- virtual void movt(Register rd, uint16_t imm16, Condition cond = AL) = 0;
- virtual void rbit(Register rd, Register rm, Condition cond = AL) = 0;
- virtual void rev(Register rd, Register rm, Condition cond = AL) = 0;
- virtual void rev16(Register rd, Register rm, Condition cond = AL) = 0;
- virtual void revsh(Register rd, Register rm, Condition cond = AL) = 0;
-
- // Multiply instructions.
- virtual void mul(Register rd, Register rn, Register rm, Condition cond = AL) = 0;
- virtual void mla(Register rd, Register rn, Register rm, Register ra,
- Condition cond = AL) = 0;
- virtual void mls(Register rd, Register rn, Register rm, Register ra,
- Condition cond = AL) = 0;
- virtual void smull(Register rd_lo, Register rd_hi, Register rn, Register rm,
- Condition cond = AL) = 0;
- virtual void umull(Register rd_lo, Register rd_hi, Register rn, Register rm,
- Condition cond = AL) = 0;
-
- virtual void sdiv(Register rd, Register rn, Register rm, Condition cond = AL) = 0;
- virtual void udiv(Register rd, Register rn, Register rm, Condition cond = AL) = 0;
-
- // Bit field extract instructions.
- virtual void sbfx(Register rd, Register rn, uint32_t lsb, uint32_t width,
- Condition cond = AL) = 0;
- virtual void ubfx(Register rd, Register rn, uint32_t lsb, uint32_t width,
- Condition cond = AL) = 0;
-
- // Load/store instructions.
- virtual void ldr(Register rd, const Address& ad, Condition cond = AL) = 0;
- virtual void str(Register rd, const Address& ad, Condition cond = AL) = 0;
-
- virtual void ldrb(Register rd, const Address& ad, Condition cond = AL) = 0;
- virtual void strb(Register rd, const Address& ad, Condition cond = AL) = 0;
-
- virtual void ldrh(Register rd, const Address& ad, Condition cond = AL) = 0;
- virtual void strh(Register rd, const Address& ad, Condition cond = AL) = 0;
-
- virtual void ldrsb(Register rd, const Address& ad, Condition cond = AL) = 0;
- virtual void ldrsh(Register rd, const Address& ad, Condition cond = AL) = 0;
-
- virtual void ldrd(Register rd, const Address& ad, Condition cond = AL) = 0;
- virtual void strd(Register rd, const Address& ad, Condition cond = AL) = 0;
-
- virtual void ldm(BlockAddressMode am, Register base,
- RegList regs, Condition cond = AL) = 0;
- virtual void stm(BlockAddressMode am, Register base,
- RegList regs, Condition cond = AL) = 0;
-
- virtual void ldrex(Register rd, Register rn, Condition cond = AL) = 0;
- virtual void strex(Register rd, Register rt, Register rn, Condition cond = AL) = 0;
- virtual void ldrexd(Register rt, Register rt2, Register rn, Condition cond = AL) = 0;
- virtual void strexd(Register rd, Register rt, Register rt2, Register rn, Condition cond = AL) = 0;
-
- // Miscellaneous instructions.
- virtual void clrex(Condition cond = AL) = 0;
- virtual void nop(Condition cond = AL) = 0;
-
- // Note that gdb sets breakpoints using the undefined instruction 0xe7f001f0.
- virtual void bkpt(uint16_t imm16) = 0;
- virtual void svc(uint32_t imm24) = 0;
-
- virtual void it(Condition firstcond ATTRIBUTE_UNUSED,
- ItState i1 ATTRIBUTE_UNUSED = kItOmitted,
- ItState i2 ATTRIBUTE_UNUSED = kItOmitted,
- ItState i3 ATTRIBUTE_UNUSED = kItOmitted) {
- // Ignored if not supported.
- }
-
- virtual void cbz(Register rn, Label* target) = 0;
- virtual void cbnz(Register rn, Label* target) = 0;
-
- // Floating point instructions (VFPv3-D16 and VFPv3-D32 profiles).
- virtual void vmovsr(SRegister sn, Register rt, Condition cond = AL) = 0;
- virtual void vmovrs(Register rt, SRegister sn, Condition cond = AL) = 0;
- virtual void vmovsrr(SRegister sm, Register rt, Register rt2, Condition cond = AL) = 0;
- virtual void vmovrrs(Register rt, Register rt2, SRegister sm, Condition cond = AL) = 0;
- virtual void vmovdrr(DRegister dm, Register rt, Register rt2, Condition cond = AL) = 0;
- virtual void vmovrrd(Register rt, Register rt2, DRegister dm, Condition cond = AL) = 0;
- virtual void vmovs(SRegister sd, SRegister sm, Condition cond = AL) = 0;
- virtual void vmovd(DRegister dd, DRegister dm, Condition cond = AL) = 0;
-
- // Returns false if the immediate cannot be encoded.
- virtual bool vmovs(SRegister sd, float s_imm, Condition cond = AL) = 0;
- virtual bool vmovd(DRegister dd, double d_imm, Condition cond = AL) = 0;
-
- virtual void vldrs(SRegister sd, const Address& ad, Condition cond = AL) = 0;
- virtual void vstrs(SRegister sd, const Address& ad, Condition cond = AL) = 0;
- virtual void vldrd(DRegister dd, const Address& ad, Condition cond = AL) = 0;
- virtual void vstrd(DRegister dd, const Address& ad, Condition cond = AL) = 0;
-
- virtual void vadds(SRegister sd, SRegister sn, SRegister sm, Condition cond = AL) = 0;
- virtual void vaddd(DRegister dd, DRegister dn, DRegister dm, Condition cond = AL) = 0;
- virtual void vsubs(SRegister sd, SRegister sn, SRegister sm, Condition cond = AL) = 0;
- virtual void vsubd(DRegister dd, DRegister dn, DRegister dm, Condition cond = AL) = 0;
- virtual void vmuls(SRegister sd, SRegister sn, SRegister sm, Condition cond = AL) = 0;
- virtual void vmuld(DRegister dd, DRegister dn, DRegister dm, Condition cond = AL) = 0;
- virtual void vmlas(SRegister sd, SRegister sn, SRegister sm, Condition cond = AL) = 0;
- virtual void vmlad(DRegister dd, DRegister dn, DRegister dm, Condition cond = AL) = 0;
- virtual void vmlss(SRegister sd, SRegister sn, SRegister sm, Condition cond = AL) = 0;
- virtual void vmlsd(DRegister dd, DRegister dn, DRegister dm, Condition cond = AL) = 0;
- virtual void vdivs(SRegister sd, SRegister sn, SRegister sm, Condition cond = AL) = 0;
- virtual void vdivd(DRegister dd, DRegister dn, DRegister dm, Condition cond = AL) = 0;
-
- virtual void vabss(SRegister sd, SRegister sm, Condition cond = AL) = 0;
- virtual void vabsd(DRegister dd, DRegister dm, Condition cond = AL) = 0;
- virtual void vnegs(SRegister sd, SRegister sm, Condition cond = AL) = 0;
- virtual void vnegd(DRegister dd, DRegister dm, Condition cond = AL) = 0;
- virtual void vsqrts(SRegister sd, SRegister sm, Condition cond = AL) = 0;
- virtual void vsqrtd(DRegister dd, DRegister dm, Condition cond = AL) = 0;
-
- virtual void vcvtsd(SRegister sd, DRegister dm, Condition cond = AL) = 0;
- virtual void vcvtds(DRegister dd, SRegister sm, Condition cond = AL) = 0;
- virtual void vcvtis(SRegister sd, SRegister sm, Condition cond = AL) = 0;
- virtual void vcvtid(SRegister sd, DRegister dm, Condition cond = AL) = 0;
- virtual void vcvtsi(SRegister sd, SRegister sm, Condition cond = AL) = 0;
- virtual void vcvtdi(DRegister dd, SRegister sm, Condition cond = AL) = 0;
- virtual void vcvtus(SRegister sd, SRegister sm, Condition cond = AL) = 0;
- virtual void vcvtud(SRegister sd, DRegister dm, Condition cond = AL) = 0;
- virtual void vcvtsu(SRegister sd, SRegister sm, Condition cond = AL) = 0;
- virtual void vcvtdu(DRegister dd, SRegister sm, Condition cond = AL) = 0;
-
- virtual void vcmps(SRegister sd, SRegister sm, Condition cond = AL) = 0;
- virtual void vcmpd(DRegister dd, DRegister dm, Condition cond = AL) = 0;
- virtual void vcmpsz(SRegister sd, Condition cond = AL) = 0;
- virtual void vcmpdz(DRegister dd, Condition cond = AL) = 0;
- virtual void vmstat(Condition cond = AL) = 0; // VMRS APSR_nzcv, FPSCR
-
- virtual void vcntd(DRegister dd, DRegister dm) = 0;
- virtual void vpaddld(DRegister dd, DRegister dm, int32_t size, bool is_unsigned) = 0;
-
- virtual void vpushs(SRegister reg, int nregs, Condition cond = AL) = 0;
- virtual void vpushd(DRegister reg, int nregs, Condition cond = AL) = 0;
- virtual void vpops(SRegister reg, int nregs, Condition cond = AL) = 0;
- virtual void vpopd(DRegister reg, int nregs, Condition cond = AL) = 0;
- virtual void vldmiad(Register base_reg, DRegister reg, int nregs, Condition cond = AL) = 0;
- virtual void vstmiad(Register base_reg, DRegister reg, int nregs, Condition cond = AL) = 0;
-
- // Branch instructions.
- virtual void b(Label* label, Condition cond = AL) = 0;
- virtual void bl(Label* label, Condition cond = AL) = 0;
- virtual void blx(Register rm, Condition cond = AL) = 0;
- virtual void bx(Register rm, Condition cond = AL) = 0;
-
- // ADR instruction loading register for branching to the label.
- virtual void AdrCode(Register rt, Label* label) = 0;
-
- // Memory barriers.
- virtual void dmb(DmbOptions flavor) = 0;
-
- void Pad(uint32_t bytes);
-
- // Adjust label position.
- void AdjustLabelPosition(Label* label) {
- DCHECK(label->IsBound());
- uint32_t old_position = static_cast<uint32_t>(label->Position());
- uint32_t new_position = GetAdjustedPosition(old_position);
- label->Reinitialize();
- DCHECK_GE(static_cast<int>(new_position), 0);
- label->BindTo(static_cast<int>(new_position));
- }
-
- // Get the final position of a label after local fixup based on the old position
- // recorded before FinalizeCode().
- virtual uint32_t GetAdjustedPosition(uint32_t old_position) = 0;
-
- // Macros.
- // Most of these are pure virtual as they need to be implemented per instruction set.
-
- // Create a new literal with a given value.
- // NOTE: Force the template parameter to be explicitly specified.
- template <typename T>
- Literal* NewLiteral(typename Identity<T>::type value) {
- static_assert(std::is_integral<T>::value, "T must be an integral type.");
- return NewLiteral(sizeof(value), reinterpret_cast<const uint8_t*>(&value));
- }
-
- // Create a new literal with the given data.
- virtual Literal* NewLiteral(size_t size, const uint8_t* data) = 0;
-
- // Load literal.
- virtual void LoadLiteral(Register rt, Literal* literal) = 0;
- virtual void LoadLiteral(Register rt, Register rt2, Literal* literal) = 0;
- virtual void LoadLiteral(SRegister sd, Literal* literal) = 0;
- virtual void LoadLiteral(DRegister dd, Literal* literal) = 0;
-
- // Add signed constant value to rd. May clobber IP.
- virtual void AddConstant(Register rd, Register rn, int32_t value,
- Condition cond = AL, SetCc set_cc = kCcDontCare) = 0;
- void AddConstantSetFlags(Register rd, Register rn, int32_t value, Condition cond = AL) {
- AddConstant(rd, rn, value, cond, kCcSet);
- }
- void AddConstant(Register rd, int32_t value, Condition cond = AL, SetCc set_cc = kCcDontCare) {
- AddConstant(rd, rd, value, cond, set_cc);
- }
-
- virtual void CmpConstant(Register rn, int32_t value, Condition cond = AL) = 0;
-
- // Load and Store. May clobber IP.
- virtual void LoadImmediate(Register rd, int32_t value, Condition cond = AL) = 0;
- void LoadSImmediate(SRegister sd, float value, Condition cond = AL) {
- if (!vmovs(sd, value, cond)) {
- int32_t int_value = bit_cast<int32_t, float>(value);
- if (int_value == bit_cast<int32_t, float>(0.0f)) {
- // 0.0 is quite common, so we special case it by loading
- // 2.0 in `sd` and then substracting it.
- bool success = vmovs(sd, 2.0, cond);
- CHECK(success);
- vsubs(sd, sd, sd, cond);
- } else {
- LoadImmediate(IP, int_value, cond);
- vmovsr(sd, IP, cond);
- }
- }
- }
-
- virtual void LoadDImmediate(DRegister dd, double value, Condition cond = AL) = 0;
-
- virtual void MarkExceptionHandler(Label* label) = 0;
- virtual void LoadFromOffset(LoadOperandType type,
- Register reg,
- Register base,
- int32_t offset,
- Condition cond = AL) = 0;
- virtual void StoreToOffset(StoreOperandType type,
- Register reg,
- Register base,
- int32_t offset,
- Condition cond = AL) = 0;
- virtual void LoadSFromOffset(SRegister reg,
- Register base,
- int32_t offset,
- Condition cond = AL) = 0;
- virtual void StoreSToOffset(SRegister reg,
- Register base,
- int32_t offset,
- Condition cond = AL) = 0;
- virtual void LoadDFromOffset(DRegister reg,
- Register base,
- int32_t offset,
- Condition cond = AL) = 0;
- virtual void StoreDToOffset(DRegister reg,
- Register base,
- int32_t offset,
- Condition cond = AL) = 0;
-
- virtual void Push(Register rd, Condition cond = AL) = 0;
- virtual void Pop(Register rd, Condition cond = AL) = 0;
-
- virtual void PushList(RegList regs, Condition cond = AL) = 0;
- virtual void PopList(RegList regs, Condition cond = AL) = 0;
-
- virtual void StoreList(RegList regs, size_t stack_offset) = 0;
- virtual void LoadList(RegList regs, size_t stack_offset) = 0;
-
- virtual void Mov(Register rd, Register rm, Condition cond = AL) = 0;
-
- // Convenience shift instructions. Use mov instruction with shifter operand
- // for variants setting the status flags or using a register shift count.
- virtual void Lsl(Register rd, Register rm, uint32_t shift_imm,
- Condition cond = AL, SetCc set_cc = kCcDontCare) = 0;
-
- void Lsls(Register rd, Register rm, uint32_t shift_imm, Condition cond = AL) {
- Lsl(rd, rm, shift_imm, cond, kCcSet);
- }
-
- virtual void Lsr(Register rd, Register rm, uint32_t shift_imm,
- Condition cond = AL, SetCc set_cc = kCcDontCare) = 0;
-
- void Lsrs(Register rd, Register rm, uint32_t shift_imm, Condition cond = AL) {
- Lsr(rd, rm, shift_imm, cond, kCcSet);
- }
-
- virtual void Asr(Register rd, Register rm, uint32_t shift_imm,
- Condition cond = AL, SetCc set_cc = kCcDontCare) = 0;
-
- void Asrs(Register rd, Register rm, uint32_t shift_imm, Condition cond = AL) {
- Asr(rd, rm, shift_imm, cond, kCcSet);
- }
-
- virtual void Ror(Register rd, Register rm, uint32_t shift_imm,
- Condition cond = AL, SetCc set_cc = kCcDontCare) = 0;
-
- void Rors(Register rd, Register rm, uint32_t shift_imm, Condition cond = AL) {
- Ror(rd, rm, shift_imm, cond, kCcSet);
- }
-
- virtual void Rrx(Register rd, Register rm,
- Condition cond = AL, SetCc set_cc = kCcDontCare) = 0;
-
- void Rrxs(Register rd, Register rm, Condition cond = AL) {
- Rrx(rd, rm, cond, kCcSet);
- }
-
- virtual void Lsl(Register rd, Register rm, Register rn,
- Condition cond = AL, SetCc set_cc = kCcDontCare) = 0;
-
- void Lsls(Register rd, Register rm, Register rn, Condition cond = AL) {
- Lsl(rd, rm, rn, cond, kCcSet);
- }
-
- virtual void Lsr(Register rd, Register rm, Register rn,
- Condition cond = AL, SetCc set_cc = kCcDontCare) = 0;
-
- void Lsrs(Register rd, Register rm, Register rn, Condition cond = AL) {
- Lsr(rd, rm, rn, cond, kCcSet);
- }
-
- virtual void Asr(Register rd, Register rm, Register rn,
- Condition cond = AL, SetCc set_cc = kCcDontCare) = 0;
-
- void Asrs(Register rd, Register rm, Register rn, Condition cond = AL) {
- Asr(rd, rm, rn, cond, kCcSet);
- }
-
- virtual void Ror(Register rd, Register rm, Register rn,
- Condition cond = AL, SetCc set_cc = kCcDontCare) = 0;
-
- void Rors(Register rd, Register rm, Register rn, Condition cond = AL) {
- Ror(rd, rm, rn, cond, kCcSet);
- }
-
- // Returns whether the `immediate` can fit in a `ShifterOperand`. If yes,
- // `shifter_op` contains the operand.
- virtual bool ShifterOperandCanHold(Register rd,
- Register rn,
- Opcode opcode,
- uint32_t immediate,
- SetCc set_cc,
- ShifterOperand* shifter_op) = 0;
- bool ShifterOperandCanHold(Register rd,
- Register rn,
- Opcode opcode,
- uint32_t immediate,
- ShifterOperand* shifter_op) {
- return ShifterOperandCanHold(rd, rn, opcode, immediate, kCcDontCare, shifter_op);
- }
-
- virtual bool ShifterOperandCanAlwaysHold(uint32_t immediate) = 0;
-
- static bool IsInstructionForExceptionHandling(uintptr_t pc);
-
- virtual void CompareAndBranchIfZero(Register r, Label* label) = 0;
- virtual void CompareAndBranchIfNonZero(Register r, Label* label) = 0;
-
- static uint32_t ModifiedImmediate(uint32_t value);
-
- static bool IsLowRegister(Register r) {
- return r < R8;
- }
-
- static bool IsHighRegister(Register r) {
- return r >= R8;
- }
-
- //
- // Heap poisoning.
- //
-
- // Poison a heap reference contained in `reg`.
- void PoisonHeapReference(Register reg) {
- // reg = -reg.
- rsb(reg, reg, ShifterOperand(0));
- }
- // Unpoison a heap reference contained in `reg`.
- void UnpoisonHeapReference(Register reg) {
- // reg = -reg.
- rsb(reg, reg, ShifterOperand(0));
- }
- // Poison a heap reference contained in `reg` if heap poisoning is enabled.
- void MaybePoisonHeapReference(Register reg) {
- if (kPoisonHeapReferences) {
- PoisonHeapReference(reg);
- }
- }
- // Unpoison a heap reference contained in `reg` if heap poisoning is enabled.
- void MaybeUnpoisonHeapReference(Register reg) {
- if (kPoisonHeapReferences) {
- UnpoisonHeapReference(reg);
- }
- }
-
- void Jump(Label* label) OVERRIDE {
- b(label);
- }
-
- // Jump table support. This is split into three functions:
- //
- // * CreateJumpTable creates the internal metadata to track the jump targets, and emits code to
- // load the base address of the jump table.
- //
- // * EmitJumpTableDispatch emits the code to actually jump, assuming that the right table value
- // has been loaded into a register already.
- //
- // * FinalizeTables emits the jump table into the literal pool. This can only be called after the
- // labels for the jump targets have been finalized.
-
- // Create a jump table for the given labels that will be emitted when finalizing. Create a load
- // sequence (or placeholder) that stores the base address into the given register. When the table
- // is emitted, offsets will be relative to the location EmitJumpTableDispatch was called on (the
- // anchor).
- virtual JumpTable* CreateJumpTable(std::vector<Label*>&& labels, Register base_reg) = 0;
-
- // Emit the jump-table jump, assuming that the right value was loaded into displacement_reg.
- virtual void EmitJumpTableDispatch(JumpTable* jump_table, Register displacement_reg) = 0;
-
- // Bind a Label that needs to be updated by the assembler in FinalizeCode() if its position
- // changes due to branch/literal fixup.
- void BindTrackedLabel(Label* label) {
- Bind(label);
- tracked_labels_.push_back(label);
- }
-
- protected:
- explicit ArmAssembler(ArenaAllocator* arena)
- : Assembler(arena), tracked_labels_(arena->Adapter(kArenaAllocAssembler)) {}
-
- // Returns whether or not the given register is used for passing parameters.
- static int RegisterCompare(const Register* reg1, const Register* reg2) {
- return *reg1 - *reg2;
- }
-
- void FinalizeTrackedLabels();
-
- // Tracked labels. Use a vector, as we need to sort before adjusting.
- ArenaVector<Label*> tracked_labels_;
-};
-
-} // namespace arm
-} // namespace art
-
-#endif // ART_COMPILER_UTILS_ARM_ASSEMBLER_ARM_H_
diff --git a/compiler/utils/arm/assembler_thumb2.cc b/compiler/utils/arm/assembler_thumb2.cc
deleted file mode 100644
index abc36c6..0000000
--- a/compiler/utils/arm/assembler_thumb2.cc
+++ /dev/null
@@ -1,4076 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <type_traits>
-
-#include "assembler_thumb2.h"
-
-#include "base/bit_utils.h"
-#include "base/logging.h"
-#include "entrypoints/quick/quick_entrypoints.h"
-#include "offsets.h"
-#include "thread.h"
-
-namespace art {
-namespace arm {
-
-template <typename Function>
-void Thumb2Assembler::Fixup::ForExpandableDependencies(Thumb2Assembler* assembler, Function fn) {
- static_assert(
- std::is_same<typename std::result_of<Function(FixupId, FixupId)>::type, void>::value,
- "Incorrect signature for argument `fn`: expected (FixupId, FixupId) -> void");
- Fixup* fixups = assembler->fixups_.data();
- for (FixupId fixup_id = 0u, end_id = assembler->fixups_.size(); fixup_id != end_id; ++fixup_id) {
- uint32_t target = fixups[fixup_id].target_;
- if (target > fixups[fixup_id].location_) {
- for (FixupId id = fixup_id + 1u; id != end_id && fixups[id].location_ < target; ++id) {
- if (fixups[id].CanExpand()) {
- fn(id, fixup_id);
- }
- }
- } else {
- for (FixupId id = fixup_id; id != 0u && fixups[id - 1u].location_ >= target; --id) {
- if (fixups[id - 1u].CanExpand()) {
- fn(id - 1u, fixup_id);
- }
- }
- }
- }
-}
-
-void Thumb2Assembler::Fixup::PrepareDependents(Thumb2Assembler* assembler) {
- // For each Fixup, it's easy to find the Fixups that it depends on as they are either
- // the following or the preceding Fixups until we find the target. However, for fixup
- // adjustment we need the reverse lookup, i.e. what Fixups depend on a given Fixup.
- // This function creates a compact representation of this relationship, where we have
- // all the dependents in a single array and Fixups reference their ranges by start
- // index and count. (Instead of having a per-fixup vector.)
-
- // Count the number of dependents of each Fixup.
- Fixup* fixups = assembler->fixups_.data();
- ForExpandableDependencies(
- assembler,
- [fixups](FixupId dependency, FixupId dependent ATTRIBUTE_UNUSED) {
- fixups[dependency].dependents_count_ += 1u;
- });
- // Assign index ranges in fixup_dependents_ to individual fixups. Record the end of the
- // range in dependents_start_, we shall later decrement it as we fill in fixup_dependents_.
- uint32_t number_of_dependents = 0u;
- for (FixupId fixup_id = 0u, end_id = assembler->fixups_.size(); fixup_id != end_id; ++fixup_id) {
- number_of_dependents += fixups[fixup_id].dependents_count_;
- fixups[fixup_id].dependents_start_ = number_of_dependents;
- }
- if (number_of_dependents == 0u) {
- return;
- }
- // Create and fill in the fixup_dependents_.
- assembler->fixup_dependents_.resize(number_of_dependents);
- FixupId* dependents = assembler->fixup_dependents_.data();
- ForExpandableDependencies(
- assembler,
- [fixups, dependents](FixupId dependency, FixupId dependent) {
- fixups[dependency].dependents_start_ -= 1u;
- dependents[fixups[dependency].dependents_start_] = dependent;
- });
-}
-
-void Thumb2Assembler::BindLabel(Label* label, uint32_t bound_pc) {
- CHECK(!label->IsBound());
-
- while (label->IsLinked()) {
- FixupId fixup_id = label->Position(); // The id for linked Fixup.
- Fixup* fixup = GetFixup(fixup_id); // Get the Fixup at this id.
- fixup->Resolve(bound_pc); // Fixup can be resolved now.
- uint32_t fixup_location = fixup->GetLocation();
- uint16_t next = buffer_.Load<uint16_t>(fixup_location); // Get next in chain.
- buffer_.Store<int16_t>(fixup_location, 0);
- label->position_ = next; // Move to next.
- }
- label->BindTo(bound_pc);
-}
-
-uint32_t Thumb2Assembler::BindLiterals() {
- // We don't add the padding here, that's done only after adjusting the Fixup sizes.
- uint32_t code_size = buffer_.Size();
- for (Literal& lit : literals_) {
- Label* label = lit.GetLabel();
- BindLabel(label, code_size);
- code_size += lit.GetSize();
- }
- return code_size;
-}
-
-void Thumb2Assembler::BindJumpTables(uint32_t code_size) {
- for (JumpTable& table : jump_tables_) {
- Label* label = table.GetLabel();
- BindLabel(label, code_size);
- code_size += table.GetSize();
- }
-}
-
-void Thumb2Assembler::AdjustFixupIfNeeded(Fixup* fixup, uint32_t* current_code_size,
- std::deque<FixupId>* fixups_to_recalculate) {
- uint32_t adjustment = fixup->AdjustSizeIfNeeded(*current_code_size);
- if (adjustment != 0u) {
- DCHECK(fixup->CanExpand());
- *current_code_size += adjustment;
- for (FixupId dependent_id : fixup->Dependents(*this)) {
- Fixup* dependent = GetFixup(dependent_id);
- dependent->IncreaseAdjustment(adjustment);
- if (buffer_.Load<int16_t>(dependent->GetLocation()) == 0) {
- buffer_.Store<int16_t>(dependent->GetLocation(), 1);
- fixups_to_recalculate->push_back(dependent_id);
- }
- }
- }
-}
-
-uint32_t Thumb2Assembler::AdjustFixups() {
- Fixup::PrepareDependents(this);
- uint32_t current_code_size = buffer_.Size();
- std::deque<FixupId> fixups_to_recalculate;
- if (kIsDebugBuild) {
- // We will use the placeholders in the buffer_ to mark whether the fixup has
- // been added to the fixups_to_recalculate. Make sure we start with zeros.
- for (Fixup& fixup : fixups_) {
- CHECK_EQ(buffer_.Load<int16_t>(fixup.GetLocation()), 0);
- }
- }
- for (Fixup& fixup : fixups_) {
- AdjustFixupIfNeeded(&fixup, ¤t_code_size, &fixups_to_recalculate);
- }
- while (!fixups_to_recalculate.empty()) {
- do {
- // Pop the fixup.
- FixupId fixup_id = fixups_to_recalculate.front();
- fixups_to_recalculate.pop_front();
- Fixup* fixup = GetFixup(fixup_id);
- DCHECK_NE(buffer_.Load<int16_t>(fixup->GetLocation()), 0);
- buffer_.Store<int16_t>(fixup->GetLocation(), 0);
- // See if it needs adjustment.
- AdjustFixupIfNeeded(fixup, ¤t_code_size, &fixups_to_recalculate);
- } while (!fixups_to_recalculate.empty());
-
- if ((current_code_size & 2) != 0 && (!literals_.empty() || !jump_tables_.empty())) {
- // If we need to add padding before literals, this may just push some out of range,
- // so recalculate all load literals. This makes up for the fact that we don't mark
- // load literal as a dependency of all previous Fixups even though it actually is.
- for (Fixup& fixup : fixups_) {
- if (fixup.IsLoadLiteral()) {
- AdjustFixupIfNeeded(&fixup, ¤t_code_size, &fixups_to_recalculate);
- }
- }
- }
- }
- if (kIsDebugBuild) {
- // Check that no fixup is marked as being in fixups_to_recalculate anymore.
- for (Fixup& fixup : fixups_) {
- CHECK_EQ(buffer_.Load<int16_t>(fixup.GetLocation()), 0);
- }
- }
-
- // Adjust literal pool labels for padding.
- DCHECK_ALIGNED(current_code_size, 2);
- uint32_t literals_adjustment = current_code_size + (current_code_size & 2) - buffer_.Size();
- if (literals_adjustment != 0u) {
- for (Literal& literal : literals_) {
- Label* label = literal.GetLabel();
- DCHECK(label->IsBound());
- int old_position = label->Position();
- label->Reinitialize();
- label->BindTo(old_position + literals_adjustment);
- }
- for (JumpTable& table : jump_tables_) {
- Label* label = table.GetLabel();
- DCHECK(label->IsBound());
- int old_position = label->Position();
- label->Reinitialize();
- label->BindTo(old_position + literals_adjustment);
- }
- }
-
- return current_code_size;
-}
-
-void Thumb2Assembler::EmitFixups(uint32_t adjusted_code_size) {
- // Move non-fixup code to its final place and emit fixups.
- // Process fixups in reverse order so that we don't repeatedly move the same data.
- size_t src_end = buffer_.Size();
- size_t dest_end = adjusted_code_size;
- buffer_.Resize(dest_end);
- DCHECK_GE(dest_end, src_end);
- for (auto i = fixups_.rbegin(), end = fixups_.rend(); i != end; ++i) {
- Fixup* fixup = &*i;
- size_t old_fixup_location = fixup->GetLocation();
- if (fixup->GetOriginalSize() == fixup->GetSize()) {
- // The size of this Fixup didn't change. To avoid moving the data
- // in small chunks, emit the code to its original position.
- fixup->Finalize(dest_end - src_end);
- fixup->Emit(old_fixup_location, &buffer_, adjusted_code_size);
- } else {
- // Move the data between the end of the fixup and src_end to its final location.
- size_t src_begin = old_fixup_location + fixup->GetOriginalSizeInBytes();
- size_t data_size = src_end - src_begin;
- size_t dest_begin = dest_end - data_size;
- buffer_.Move(dest_begin, src_begin, data_size);
- src_end = old_fixup_location;
- dest_end = dest_begin - fixup->GetSizeInBytes();
- // Finalize the Fixup and emit the data to the new location.
- fixup->Finalize(dest_end - src_end);
- fixup->Emit(fixup->GetLocation(), &buffer_, adjusted_code_size);
- }
- }
- CHECK_EQ(src_end, dest_end);
-}
-
-void Thumb2Assembler::EmitLiterals() {
- if (!literals_.empty()) {
- // Load literal instructions (LDR, LDRD, VLDR) require 4-byte alignment.
- // We don't support byte and half-word literals.
- uint32_t code_size = buffer_.Size();
- DCHECK_ALIGNED(code_size, 2);
- if ((code_size & 2u) != 0u) {
- Emit16(0);
- }
- for (Literal& literal : literals_) {
- AssemblerBuffer::EnsureCapacity ensured(&buffer_);
- DCHECK_EQ(static_cast<size_t>(literal.GetLabel()->Position()), buffer_.Size());
- DCHECK(literal.GetSize() == 4u || literal.GetSize() == 8u);
- for (size_t i = 0, size = literal.GetSize(); i != size; ++i) {
- buffer_.Emit<uint8_t>(literal.GetData()[i]);
- }
- }
- }
-}
-
-void Thumb2Assembler::EmitJumpTables() {
- if (!jump_tables_.empty()) {
- // Jump tables require 4 byte alignment. (We don't support byte and half-word jump tables.)
- uint32_t code_size = buffer_.Size();
- DCHECK_ALIGNED(code_size, 2);
- if ((code_size & 2u) != 0u) {
- Emit16(0);
- }
- for (JumpTable& table : jump_tables_) {
- // Bulk ensure capacity, as this may be large.
- size_t orig_size = buffer_.Size();
- size_t required_capacity = orig_size + table.GetSize();
- if (required_capacity > buffer_.Capacity()) {
- buffer_.ExtendCapacity(required_capacity);
- }
-#ifndef NDEBUG
- buffer_.has_ensured_capacity_ = true;
-#endif
-
- DCHECK_EQ(static_cast<size_t>(table.GetLabel()->Position()), buffer_.Size());
- int32_t anchor_position = table.GetAnchorLabel()->Position() + 4;
-
- for (Label* target : table.GetData()) {
- // Ensure that the label was tracked, so that it will have the right position.
- DCHECK(std::find(tracked_labels_.begin(), tracked_labels_.end(), target) !=
- tracked_labels_.end());
-
- int32_t offset = target->Position() - anchor_position;
- buffer_.Emit<int32_t>(offset);
- }
-
-#ifndef NDEBUG
- buffer_.has_ensured_capacity_ = false;
-#endif
- size_t new_size = buffer_.Size();
- DCHECK_LE(new_size - orig_size, table.GetSize());
- }
- }
-}
-
-void Thumb2Assembler::PatchCFI() {
- if (cfi().NumberOfDelayedAdvancePCs() == 0u) {
- return;
- }
-
- typedef DebugFrameOpCodeWriterForAssembler::DelayedAdvancePC DelayedAdvancePC;
- const auto data = cfi().ReleaseStreamAndPrepareForDelayedAdvancePC();
- const std::vector<uint8_t>& old_stream = data.first;
- const std::vector<DelayedAdvancePC>& advances = data.second;
-
- // Refill our data buffer with patched opcodes.
- cfi().ReserveCFIStream(old_stream.size() + advances.size() + 16);
- size_t stream_pos = 0;
- for (const DelayedAdvancePC& advance : advances) {
- DCHECK_GE(advance.stream_pos, stream_pos);
- // Copy old data up to the point where advance was issued.
- cfi().AppendRawData(old_stream, stream_pos, advance.stream_pos);
- stream_pos = advance.stream_pos;
- // Insert the advance command with its final offset.
- size_t final_pc = GetAdjustedPosition(advance.pc);
- cfi().AdvancePC(final_pc);
- }
- // Copy the final segment if any.
- cfi().AppendRawData(old_stream, stream_pos, old_stream.size());
-}
-
-inline int16_t Thumb2Assembler::BEncoding16(int32_t offset, Condition cond) {
- DCHECK_ALIGNED(offset, 2);
- int16_t encoding = static_cast<int16_t>(B15 | B14);
- if (cond != AL) {
- DCHECK(IsInt<9>(offset));
- encoding |= B12 | (static_cast<int32_t>(cond) << 8) | ((offset >> 1) & 0xff);
- } else {
- DCHECK(IsInt<12>(offset));
- encoding |= B13 | ((offset >> 1) & 0x7ff);
- }
- return encoding;
-}
-
-inline int32_t Thumb2Assembler::BEncoding32(int32_t offset, Condition cond) {
- DCHECK_ALIGNED(offset, 2);
- int32_t s = (offset >> 31) & 1; // Sign bit.
- int32_t encoding = B31 | B30 | B29 | B28 | B15 |
- (s << 26) | // Sign bit goes to bit 26.
- ((offset >> 1) & 0x7ff); // imm11 goes to bits 0-10.
- if (cond != AL) {
- DCHECK(IsInt<21>(offset));
- // Encode cond, move imm6 from bits 12-17 to bits 16-21 and move J1 and J2.
- encoding |= (static_cast<int32_t>(cond) << 22) | ((offset & 0x3f000) << (16 - 12)) |
- ((offset & (1 << 19)) >> (19 - 13)) | // Extract J1 from bit 19 to bit 13.
- ((offset & (1 << 18)) >> (18 - 11)); // Extract J2 from bit 18 to bit 11.
- } else {
- DCHECK(IsInt<25>(offset));
- int32_t j1 = ((offset >> 23) ^ s ^ 1) & 1; // Calculate J1 from I1 extracted from bit 23.
- int32_t j2 = ((offset >> 22)^ s ^ 1) & 1; // Calculate J2 from I2 extracted from bit 22.
- // Move imm10 from bits 12-21 to bits 16-25 and add J1 and J2.
- encoding |= B12 | ((offset & 0x3ff000) << (16 - 12)) |
- (j1 << 13) | (j2 << 11);
- }
- return encoding;
-}
-
-inline int16_t Thumb2Assembler::CbxzEncoding16(Register rn, int32_t offset, Condition cond) {
- DCHECK(!IsHighRegister(rn));
- DCHECK_ALIGNED(offset, 2);
- DCHECK(IsUint<7>(offset));
- DCHECK(cond == EQ || cond == NE);
- return B15 | B13 | B12 | B8 | (cond == NE ? B11 : 0) | static_cast<int32_t>(rn) |
- ((offset & 0x3e) << (3 - 1)) | // Move imm5 from bits 1-5 to bits 3-7.
- ((offset & 0x40) << (9 - 6)); // Move i from bit 6 to bit 11
-}
-
-inline int16_t Thumb2Assembler::CmpRnImm8Encoding16(Register rn, int32_t value) {
- DCHECK(!IsHighRegister(rn));
- DCHECK(IsUint<8>(value));
- return B13 | B11 | (rn << 8) | value;
-}
-
-inline int16_t Thumb2Assembler::AddRdnRmEncoding16(Register rdn, Register rm) {
- // The high bit of rn is moved across 4-bit rm.
- return B14 | B10 | (static_cast<int32_t>(rm) << 3) |
- (static_cast<int32_t>(rdn) & 7) | ((static_cast<int32_t>(rdn) & 8) << 4);
-}
-
-inline int32_t Thumb2Assembler::MovwEncoding32(Register rd, int32_t value) {
- DCHECK(IsUint<16>(value));
- return B31 | B30 | B29 | B28 | B25 | B22 |
- (static_cast<int32_t>(rd) << 8) |
- ((value & 0xf000) << (16 - 12)) | // Move imm4 from bits 12-15 to bits 16-19.
- ((value & 0x0800) << (26 - 11)) | // Move i from bit 11 to bit 26.
- ((value & 0x0700) << (12 - 8)) | // Move imm3 from bits 8-10 to bits 12-14.
- (value & 0xff); // Keep imm8 in bits 0-7.
-}
-
-inline int32_t Thumb2Assembler::MovtEncoding32(Register rd, int32_t value) {
- DCHECK_EQ(value & 0xffff, 0);
- int32_t movw_encoding = MovwEncoding32(rd, (value >> 16) & 0xffff);
- return movw_encoding | B25 | B23;
-}
-
-inline int32_t Thumb2Assembler::MovModImmEncoding32(Register rd, int32_t value) {
- uint32_t mod_imm = ModifiedImmediate(value);
- DCHECK_NE(mod_imm, kInvalidModifiedImmediate);
- return B31 | B30 | B29 | B28 | B22 | B19 | B18 | B17 | B16 |
- (static_cast<int32_t>(rd) << 8) | static_cast<int32_t>(mod_imm);
-}
-
-inline int16_t Thumb2Assembler::LdrLitEncoding16(Register rt, int32_t offset) {
- DCHECK(!IsHighRegister(rt));
- DCHECK_ALIGNED(offset, 4);
- DCHECK(IsUint<10>(offset));
- return B14 | B11 | (static_cast<int32_t>(rt) << 8) | (offset >> 2);
-}
-
-inline int32_t Thumb2Assembler::LdrLitEncoding32(Register rt, int32_t offset) {
- // NOTE: We don't support negative offset, i.e. U=0 (B23).
- return LdrRtRnImm12Encoding(rt, PC, offset);
-}
-
-inline int32_t Thumb2Assembler::LdrdEncoding32(Register rt, Register rt2, Register rn, int32_t offset) {
- DCHECK_ALIGNED(offset, 4);
- CHECK(IsUint<10>(offset));
- return B31 | B30 | B29 | B27 |
- B24 /* P = 1 */ | B23 /* U = 1 */ | B22 | 0 /* W = 0 */ | B20 |
- (static_cast<int32_t>(rn) << 16) | (static_cast<int32_t>(rt) << 12) |
- (static_cast<int32_t>(rt2) << 8) | (offset >> 2);
-}
-
-inline int32_t Thumb2Assembler::VldrsEncoding32(SRegister sd, Register rn, int32_t offset) {
- DCHECK_ALIGNED(offset, 4);
- CHECK(IsUint<10>(offset));
- return B31 | B30 | B29 | B27 | B26 | B24 |
- B23 /* U = 1 */ | B20 | B11 | B9 |
- (static_cast<int32_t>(rn) << 16) |
- ((static_cast<int32_t>(sd) & 0x01) << (22 - 0)) | // Move D from bit 0 to bit 22.
- ((static_cast<int32_t>(sd) & 0x1e) << (12 - 1)) | // Move Vd from bits 1-4 to bits 12-15.
- (offset >> 2);
-}
-
-inline int32_t Thumb2Assembler::VldrdEncoding32(DRegister dd, Register rn, int32_t offset) {
- DCHECK_ALIGNED(offset, 4);
- CHECK(IsUint<10>(offset));
- return B31 | B30 | B29 | B27 | B26 | B24 |
- B23 /* U = 1 */ | B20 | B11 | B9 | B8 |
- (rn << 16) |
- ((static_cast<int32_t>(dd) & 0x10) << (22 - 4)) | // Move D from bit 4 to bit 22.
- ((static_cast<int32_t>(dd) & 0x0f) << (12 - 0)) | // Move Vd from bits 0-3 to bits 12-15.
- (offset >> 2);
-}
-
-inline int16_t Thumb2Assembler::LdrRtRnImm5Encoding16(Register rt, Register rn, int32_t offset) {
- DCHECK(!IsHighRegister(rt));
- DCHECK(!IsHighRegister(rn));
- DCHECK_ALIGNED(offset, 4);
- DCHECK(IsUint<7>(offset));
- return B14 | B13 | B11 |
- (static_cast<int32_t>(rn) << 3) | static_cast<int32_t>(rt) |
- (offset << (6 - 2)); // Move imm5 from bits 2-6 to bits 6-10.
-}
-
-int32_t Thumb2Assembler::Fixup::LoadWideOrFpEncoding(Register rbase, int32_t offset) const {
- switch (type_) {
- case kLoadLiteralWide:
- return LdrdEncoding32(rn_, rt2_, rbase, offset);
- case kLoadFPLiteralSingle:
- return VldrsEncoding32(sd_, rbase, offset);
- case kLoadFPLiteralDouble:
- return VldrdEncoding32(dd_, rbase, offset);
- default:
- LOG(FATAL) << "Unexpected type: " << static_cast<int>(type_);
- UNREACHABLE();
- }
-}
-
-inline int32_t Thumb2Assembler::LdrRtRnImm12Encoding(Register rt, Register rn, int32_t offset) {
- DCHECK(IsUint<12>(offset));
- return B31 | B30 | B29 | B28 | B27 | B23 | B22 | B20 | (rn << 16) | (rt << 12) | offset;
-}
-
-inline int16_t Thumb2Assembler::AdrEncoding16(Register rd, int32_t offset) {
- DCHECK(IsUint<10>(offset));
- DCHECK(IsAligned<4>(offset));
- DCHECK(!IsHighRegister(rd));
- return B15 | B13 | (rd << 8) | (offset >> 2);
-}
-
-inline int32_t Thumb2Assembler::AdrEncoding32(Register rd, int32_t offset) {
- DCHECK(IsUint<12>(offset));
- // Bit 26: offset[11]
- // Bits 14-12: offset[10-8]
- // Bits 7-0: offset[7-0]
- int32_t immediate_mask =
- ((offset & (1 << 11)) << (26 - 11)) |
- ((offset & (7 << 8)) << (12 - 8)) |
- (offset & 0xFF);
- return B31 | B30 | B29 | B28 | B25 | B19 | B18 | B17 | B16 | (rd << 8) | immediate_mask;
-}
-
-void Thumb2Assembler::FinalizeCode() {
- ArmAssembler::FinalizeCode();
- uint32_t size_after_literals = BindLiterals();
- BindJumpTables(size_after_literals);
- uint32_t adjusted_code_size = AdjustFixups();
- EmitFixups(adjusted_code_size);
- EmitLiterals();
- FinalizeTrackedLabels();
- EmitJumpTables();
- PatchCFI();
-}
-
-bool Thumb2Assembler::ShifterOperandCanAlwaysHold(uint32_t immediate) {
- return ArmAssembler::ModifiedImmediate(immediate) != kInvalidModifiedImmediate;
-}
-
-bool Thumb2Assembler::ShifterOperandCanHold(Register rd ATTRIBUTE_UNUSED,
- Register rn ATTRIBUTE_UNUSED,
- Opcode opcode,
- uint32_t immediate,
- SetCc set_cc,
- ShifterOperand* shifter_op) {
- shifter_op->type_ = ShifterOperand::kImmediate;
- shifter_op->immed_ = immediate;
- shifter_op->is_shift_ = false;
- shifter_op->is_rotate_ = false;
- switch (opcode) {
- case ADD:
- case SUB:
- // Less than (or equal to) 12 bits can be done if we don't need to set condition codes.
- if (immediate < (1 << 12) && set_cc != kCcSet) {
- return true;
- }
- return ArmAssembler::ModifiedImmediate(immediate) != kInvalidModifiedImmediate;
-
- case MOV:
- // TODO: Support less than or equal to 12bits.
- return ArmAssembler::ModifiedImmediate(immediate) != kInvalidModifiedImmediate;
-
- case MVN:
- default:
- return ArmAssembler::ModifiedImmediate(immediate) != kInvalidModifiedImmediate;
- }
-}
-
-void Thumb2Assembler::and_(Register rd, Register rn, const ShifterOperand& so,
- Condition cond, SetCc set_cc) {
- EmitDataProcessing(cond, AND, set_cc, rn, rd, so);
-}
-
-
-void Thumb2Assembler::eor(Register rd, Register rn, const ShifterOperand& so,
- Condition cond, SetCc set_cc) {
- EmitDataProcessing(cond, EOR, set_cc, rn, rd, so);
-}
-
-
-void Thumb2Assembler::sub(Register rd, Register rn, const ShifterOperand& so,
- Condition cond, SetCc set_cc) {
- EmitDataProcessing(cond, SUB, set_cc, rn, rd, so);
-}
-
-
-void Thumb2Assembler::rsb(Register rd, Register rn, const ShifterOperand& so,
- Condition cond, SetCc set_cc) {
- EmitDataProcessing(cond, RSB, set_cc, rn, rd, so);
-}
-
-
-void Thumb2Assembler::add(Register rd, Register rn, const ShifterOperand& so,
- Condition cond, SetCc set_cc) {
- EmitDataProcessing(cond, ADD, set_cc, rn, rd, so);
-}
-
-
-void Thumb2Assembler::adc(Register rd, Register rn, const ShifterOperand& so,
- Condition cond, SetCc set_cc) {
- EmitDataProcessing(cond, ADC, set_cc, rn, rd, so);
-}
-
-
-void Thumb2Assembler::sbc(Register rd, Register rn, const ShifterOperand& so,
- Condition cond, SetCc set_cc) {
- EmitDataProcessing(cond, SBC, set_cc, rn, rd, so);
-}
-
-
-void Thumb2Assembler::rsc(Register rd, Register rn, const ShifterOperand& so,
- Condition cond, SetCc set_cc) {
- EmitDataProcessing(cond, RSC, set_cc, rn, rd, so);
-}
-
-
-void Thumb2Assembler::tst(Register rn, const ShifterOperand& so, Condition cond) {
- CHECK_NE(rn, PC); // Reserve tst pc instruction for exception handler marker.
- EmitDataProcessing(cond, TST, kCcSet, rn, R0, so);
-}
-
-
-void Thumb2Assembler::teq(Register rn, const ShifterOperand& so, Condition cond) {
- CHECK_NE(rn, PC); // Reserve teq pc instruction for exception handler marker.
- EmitDataProcessing(cond, TEQ, kCcSet, rn, R0, so);
-}
-
-
-void Thumb2Assembler::cmp(Register rn, const ShifterOperand& so, Condition cond) {
- EmitDataProcessing(cond, CMP, kCcSet, rn, R0, so);
-}
-
-
-void Thumb2Assembler::cmn(Register rn, const ShifterOperand& so, Condition cond) {
- EmitDataProcessing(cond, CMN, kCcSet, rn, R0, so);
-}
-
-
-void Thumb2Assembler::orr(Register rd, Register rn, const ShifterOperand& so,
- Condition cond, SetCc set_cc) {
- EmitDataProcessing(cond, ORR, set_cc, rn, rd, so);
-}
-
-
-void Thumb2Assembler::orn(Register rd, Register rn, const ShifterOperand& so,
- Condition cond, SetCc set_cc) {
- EmitDataProcessing(cond, ORN, set_cc, rn, rd, so);
-}
-
-
-void Thumb2Assembler::mov(Register rd, const ShifterOperand& so,
- Condition cond, SetCc set_cc) {
- EmitDataProcessing(cond, MOV, set_cc, R0, rd, so);
-}
-
-
-void Thumb2Assembler::bic(Register rd, Register rn, const ShifterOperand& so,
- Condition cond, SetCc set_cc) {
- EmitDataProcessing(cond, BIC, set_cc, rn, rd, so);
-}
-
-
-void Thumb2Assembler::mvn(Register rd, const ShifterOperand& so,
- Condition cond, SetCc set_cc) {
- EmitDataProcessing(cond, MVN, set_cc, R0, rd, so);
-}
-
-
-void Thumb2Assembler::mul(Register rd, Register rn, Register rm, Condition cond) {
- CheckCondition(cond);
-
- if (rd == rm && !IsHighRegister(rd) && !IsHighRegister(rn) && !force_32bit_) {
- // 16 bit.
- int16_t encoding = B14 | B9 | B8 | B6 |
- rn << 3 | rd;
- Emit16(encoding);
- } else {
- // 32 bit.
- uint32_t op1 = 0U /* 0b000 */;
- uint32_t op2 = 0U /* 0b00 */;
- int32_t encoding = B31 | B30 | B29 | B28 | B27 | B25 | B24 |
- op1 << 20 |
- B15 | B14 | B13 | B12 |
- op2 << 4 |
- static_cast<uint32_t>(rd) << 8 |
- static_cast<uint32_t>(rn) << 16 |
- static_cast<uint32_t>(rm);
-
- Emit32(encoding);
- }
-}
-
-
-void Thumb2Assembler::mla(Register rd, Register rn, Register rm, Register ra,
- Condition cond) {
- CheckCondition(cond);
-
- uint32_t op1 = 0U /* 0b000 */;
- uint32_t op2 = 0U /* 0b00 */;
- int32_t encoding = B31 | B30 | B29 | B28 | B27 | B25 | B24 |
- op1 << 20 |
- op2 << 4 |
- static_cast<uint32_t>(rd) << 8 |
- static_cast<uint32_t>(ra) << 12 |
- static_cast<uint32_t>(rn) << 16 |
- static_cast<uint32_t>(rm);
-
- Emit32(encoding);
-}
-
-
-void Thumb2Assembler::mls(Register rd, Register rn, Register rm, Register ra,
- Condition cond) {
- CheckCondition(cond);
-
- uint32_t op1 = 0U /* 0b000 */;
- uint32_t op2 = 01 /* 0b01 */;
- int32_t encoding = B31 | B30 | B29 | B28 | B27 | B25 | B24 |
- op1 << 20 |
- op2 << 4 |
- static_cast<uint32_t>(rd) << 8 |
- static_cast<uint32_t>(ra) << 12 |
- static_cast<uint32_t>(rn) << 16 |
- static_cast<uint32_t>(rm);
-
- Emit32(encoding);
-}
-
-
-void Thumb2Assembler::smull(Register rd_lo, Register rd_hi, Register rn,
- Register rm, Condition cond) {
- CheckCondition(cond);
-
- uint32_t op1 = 0U /* 0b000; */;
- uint32_t op2 = 0U /* 0b0000 */;
- int32_t encoding = B31 | B30 | B29 | B28 | B27 | B25 | B24 | B23 |
- op1 << 20 |
- op2 << 4 |
- static_cast<uint32_t>(rd_lo) << 12 |
- static_cast<uint32_t>(rd_hi) << 8 |
- static_cast<uint32_t>(rn) << 16 |
- static_cast<uint32_t>(rm);
-
- Emit32(encoding);
-}
-
-
-void Thumb2Assembler::umull(Register rd_lo, Register rd_hi, Register rn,
- Register rm, Condition cond) {
- CheckCondition(cond);
-
- uint32_t op1 = 2U /* 0b010; */;
- uint32_t op2 = 0U /* 0b0000 */;
- int32_t encoding = B31 | B30 | B29 | B28 | B27 | B25 | B24 | B23 |
- op1 << 20 |
- op2 << 4 |
- static_cast<uint32_t>(rd_lo) << 12 |
- static_cast<uint32_t>(rd_hi) << 8 |
- static_cast<uint32_t>(rn) << 16 |
- static_cast<uint32_t>(rm);
-
- Emit32(encoding);
-}
-
-
-void Thumb2Assembler::sdiv(Register rd, Register rn, Register rm, Condition cond) {
- CheckCondition(cond);
-
- uint32_t op1 = 1U /* 0b001 */;
- uint32_t op2 = 15U /* 0b1111 */;
- int32_t encoding = B31 | B30 | B29 | B28 | B27 | B25 | B24 | B23 | B20 |
- op1 << 20 |
- op2 << 4 |
- 0xf << 12 |
- static_cast<uint32_t>(rd) << 8 |
- static_cast<uint32_t>(rn) << 16 |
- static_cast<uint32_t>(rm);
-
- Emit32(encoding);
-}
-
-
-void Thumb2Assembler::udiv(Register rd, Register rn, Register rm, Condition cond) {
- CheckCondition(cond);
-
- uint32_t op1 = 1U /* 0b001 */;
- uint32_t op2 = 15U /* 0b1111 */;
- int32_t encoding = B31 | B30 | B29 | B28 | B27 | B25 | B24 | B23 | B21 | B20 |
- op1 << 20 |
- op2 << 4 |
- 0xf << 12 |
- static_cast<uint32_t>(rd) << 8 |
- static_cast<uint32_t>(rn) << 16 |
- static_cast<uint32_t>(rm);
-
- Emit32(encoding);
-}
-
-
-void Thumb2Assembler::sbfx(Register rd, Register rn, uint32_t lsb, uint32_t width, Condition cond) {
- CheckCondition(cond);
- CHECK_LE(lsb, 31U);
- CHECK(1U <= width && width <= 32U) << width;
- uint32_t widthminus1 = width - 1;
- uint32_t imm2 = lsb & (B1 | B0); // Bits 0-1 of `lsb`.
- uint32_t imm3 = (lsb & (B4 | B3 | B2)) >> 2; // Bits 2-4 of `lsb`.
-
- uint32_t op = 20U /* 0b10100 */;
- int32_t encoding = B31 | B30 | B29 | B28 | B25 |
- op << 20 |
- static_cast<uint32_t>(rn) << 16 |
- imm3 << 12 |
- static_cast<uint32_t>(rd) << 8 |
- imm2 << 6 |
- widthminus1;
-
- Emit32(encoding);
-}
-
-
-void Thumb2Assembler::ubfx(Register rd, Register rn, uint32_t lsb, uint32_t width, Condition cond) {
- CheckCondition(cond);
- CHECK_LE(lsb, 31U);
- CHECK(1U <= width && width <= 32U) << width;
- uint32_t widthminus1 = width - 1;
- uint32_t imm2 = lsb & (B1 | B0); // Bits 0-1 of `lsb`.
- uint32_t imm3 = (lsb & (B4 | B3 | B2)) >> 2; // Bits 2-4 of `lsb`.
-
- uint32_t op = 28U /* 0b11100 */;
- int32_t encoding = B31 | B30 | B29 | B28 | B25 |
- op << 20 |
- static_cast<uint32_t>(rn) << 16 |
- imm3 << 12 |
- static_cast<uint32_t>(rd) << 8 |
- imm2 << 6 |
- widthminus1;
-
- Emit32(encoding);
-}
-
-
-void Thumb2Assembler::ldr(Register rd, const Address& ad, Condition cond) {
- EmitLoadStore(cond, true, false, false, false, rd, ad);
-}
-
-
-void Thumb2Assembler::str(Register rd, const Address& ad, Condition cond) {
- EmitLoadStore(cond, false, false, false, false, rd, ad);
-}
-
-
-void Thumb2Assembler::ldrb(Register rd, const Address& ad, Condition cond) {
- EmitLoadStore(cond, true, true, false, false, rd, ad);
-}
-
-
-void Thumb2Assembler::strb(Register rd, const Address& ad, Condition cond) {
- EmitLoadStore(cond, false, true, false, false, rd, ad);
-}
-
-
-void Thumb2Assembler::ldrh(Register rd, const Address& ad, Condition cond) {
- EmitLoadStore(cond, true, false, true, false, rd, ad);
-}
-
-
-void Thumb2Assembler::strh(Register rd, const Address& ad, Condition cond) {
- EmitLoadStore(cond, false, false, true, false, rd, ad);
-}
-
-
-void Thumb2Assembler::ldrsb(Register rd, const Address& ad, Condition cond) {
- EmitLoadStore(cond, true, true, false, true, rd, ad);
-}
-
-
-void Thumb2Assembler::ldrsh(Register rd, const Address& ad, Condition cond) {
- EmitLoadStore(cond, true, false, true, true, rd, ad);
-}
-
-
-void Thumb2Assembler::ldrd(Register rd, const Address& ad, Condition cond) {
- ldrd(rd, Register(rd + 1), ad, cond);
-}
-
-
-void Thumb2Assembler::ldrd(Register rd, Register rd2, const Address& ad, Condition cond) {
- CheckCondition(cond);
- // Encoding T1.
- // This is different from other loads. The encoding is like ARM.
- int32_t encoding = B31 | B30 | B29 | B27 | B22 | B20 |
- static_cast<int32_t>(rd) << 12 |
- static_cast<int32_t>(rd2) << 8 |
- ad.encodingThumbLdrdStrd();
- Emit32(encoding);
-}
-
-
-void Thumb2Assembler::strd(Register rd, const Address& ad, Condition cond) {
- strd(rd, Register(rd + 1), ad, cond);
-}
-
-
-void Thumb2Assembler::strd(Register rd, Register rd2, const Address& ad, Condition cond) {
- CheckCondition(cond);
- // Encoding T1.
- // This is different from other loads. The encoding is like ARM.
- int32_t encoding = B31 | B30 | B29 | B27 | B22 |
- static_cast<int32_t>(rd) << 12 |
- static_cast<int32_t>(rd2) << 8 |
- ad.encodingThumbLdrdStrd();
- Emit32(encoding);
-}
-
-
-void Thumb2Assembler::ldm(BlockAddressMode am,
- Register base,
- RegList regs,
- Condition cond) {
- CHECK_NE(regs, 0u); // Do not use ldm if there's nothing to load.
- if (IsPowerOfTwo(regs)) {
- // Thumb doesn't support one reg in the list.
- // Find the register number.
- int reg = CTZ(static_cast<uint32_t>(regs));
- CHECK_LT(reg, 16);
- CHECK(am == DB_W); // Only writeback is supported.
- ldr(static_cast<Register>(reg), Address(base, kRegisterSize, Address::PostIndex), cond);
- } else {
- EmitMultiMemOp(cond, am, true, base, regs);
- }
-}
-
-
-void Thumb2Assembler::stm(BlockAddressMode am,
- Register base,
- RegList regs,
- Condition cond) {
- CHECK_NE(regs, 0u); // Do not use stm if there's nothing to store.
- if (IsPowerOfTwo(regs)) {
- // Thumb doesn't support one reg in the list.
- // Find the register number.
- int reg = CTZ(static_cast<uint32_t>(regs));
- CHECK_LT(reg, 16);
- CHECK(am == IA || am == IA_W);
- Address::Mode strmode = am == IA ? Address::PreIndex : Address::Offset;
- str(static_cast<Register>(reg), Address(base, -kRegisterSize, strmode), cond);
- } else {
- EmitMultiMemOp(cond, am, false, base, regs);
- }
-}
-
-
-bool Thumb2Assembler::vmovs(SRegister sd, float s_imm, Condition cond) {
- uint32_t imm32 = bit_cast<uint32_t, float>(s_imm);
- if (((imm32 & ((1 << 19) - 1)) == 0) &&
- ((((imm32 >> 25) & ((1 << 6) - 1)) == (1 << 5)) ||
- (((imm32 >> 25) & ((1 << 6) - 1)) == ((1 << 5) -1)))) {
- uint8_t imm8 = ((imm32 >> 31) << 7) | (((imm32 >> 29) & 1) << 6) |
- ((imm32 >> 19) & ((1 << 6) -1));
- EmitVFPsss(cond, B23 | B21 | B20 | ((imm8 >> 4)*B16) | (imm8 & 0xf),
- sd, S0, S0);
- return true;
- }
- return false;
-}
-
-
-bool Thumb2Assembler::vmovd(DRegister dd, double d_imm, Condition cond) {
- uint64_t imm64 = bit_cast<uint64_t, double>(d_imm);
- if (((imm64 & ((1LL << 48) - 1)) == 0) &&
- ((((imm64 >> 54) & ((1 << 9) - 1)) == (1 << 8)) ||
- (((imm64 >> 54) & ((1 << 9) - 1)) == ((1 << 8) -1)))) {
- uint8_t imm8 = ((imm64 >> 63) << 7) | (((imm64 >> 61) & 1) << 6) |
- ((imm64 >> 48) & ((1 << 6) -1));
- EmitVFPddd(cond, B23 | B21 | B20 | ((imm8 >> 4)*B16) | B8 | (imm8 & 0xf),
- dd, D0, D0);
- return true;
- }
- return false;
-}
-
-
-void Thumb2Assembler::vmovs(SRegister sd, SRegister sm, Condition cond) {
- EmitVFPsss(cond, B23 | B21 | B20 | B6, sd, S0, sm);
-}
-
-
-void Thumb2Assembler::vmovd(DRegister dd, DRegister dm, Condition cond) {
- EmitVFPddd(cond, B23 | B21 | B20 | B6, dd, D0, dm);
-}
-
-
-void Thumb2Assembler::vadds(SRegister sd, SRegister sn, SRegister sm,
- Condition cond) {
- EmitVFPsss(cond, B21 | B20, sd, sn, sm);
-}
-
-
-void Thumb2Assembler::vaddd(DRegister dd, DRegister dn, DRegister dm,
- Condition cond) {
- EmitVFPddd(cond, B21 | B20, dd, dn, dm);
-}
-
-
-void Thumb2Assembler::vsubs(SRegister sd, SRegister sn, SRegister sm,
- Condition cond) {
- EmitVFPsss(cond, B21 | B20 | B6, sd, sn, sm);
-}
-
-
-void Thumb2Assembler::vsubd(DRegister dd, DRegister dn, DRegister dm,
- Condition cond) {
- EmitVFPddd(cond, B21 | B20 | B6, dd, dn, dm);
-}
-
-
-void Thumb2Assembler::vmuls(SRegister sd, SRegister sn, SRegister sm,
- Condition cond) {
- EmitVFPsss(cond, B21, sd, sn, sm);
-}
-
-
-void Thumb2Assembler::vmuld(DRegister dd, DRegister dn, DRegister dm,
- Condition cond) {
- EmitVFPddd(cond, B21, dd, dn, dm);
-}
-
-
-void Thumb2Assembler::vmlas(SRegister sd, SRegister sn, SRegister sm,
- Condition cond) {
- EmitVFPsss(cond, 0, sd, sn, sm);
-}
-
-
-void Thumb2Assembler::vmlad(DRegister dd, DRegister dn, DRegister dm,
- Condition cond) {
- EmitVFPddd(cond, 0, dd, dn, dm);
-}
-
-
-void Thumb2Assembler::vmlss(SRegister sd, SRegister sn, SRegister sm,
- Condition cond) {
- EmitVFPsss(cond, B6, sd, sn, sm);
-}
-
-
-void Thumb2Assembler::vmlsd(DRegister dd, DRegister dn, DRegister dm,
- Condition cond) {
- EmitVFPddd(cond, B6, dd, dn, dm);
-}
-
-
-void Thumb2Assembler::vdivs(SRegister sd, SRegister sn, SRegister sm,
- Condition cond) {
- EmitVFPsss(cond, B23, sd, sn, sm);
-}
-
-
-void Thumb2Assembler::vdivd(DRegister dd, DRegister dn, DRegister dm,
- Condition cond) {
- EmitVFPddd(cond, B23, dd, dn, dm);
-}
-
-
-void Thumb2Assembler::vabss(SRegister sd, SRegister sm, Condition cond) {
- EmitVFPsss(cond, B23 | B21 | B20 | B7 | B6, sd, S0, sm);
-}
-
-
-void Thumb2Assembler::vabsd(DRegister dd, DRegister dm, Condition cond) {
- EmitVFPddd(cond, B23 | B21 | B20 | B7 | B6, dd, D0, dm);
-}
-
-
-void Thumb2Assembler::vnegs(SRegister sd, SRegister sm, Condition cond) {
- EmitVFPsss(cond, B23 | B21 | B20 | B16 | B6, sd, S0, sm);
-}
-
-
-void Thumb2Assembler::vnegd(DRegister dd, DRegister dm, Condition cond) {
- EmitVFPddd(cond, B23 | B21 | B20 | B16 | B6, dd, D0, dm);
-}
-
-
-void Thumb2Assembler::vsqrts(SRegister sd, SRegister sm, Condition cond) {
- EmitVFPsss(cond, B23 | B21 | B20 | B16 | B7 | B6, sd, S0, sm);
-}
-
-void Thumb2Assembler::vsqrtd(DRegister dd, DRegister dm, Condition cond) {
- EmitVFPddd(cond, B23 | B21 | B20 | B16 | B7 | B6, dd, D0, dm);
-}
-
-
-void Thumb2Assembler::vcvtsd(SRegister sd, DRegister dm, Condition cond) {
- EmitVFPsd(cond, B23 | B21 | B20 | B18 | B17 | B16 | B8 | B7 | B6, sd, dm);
-}
-
-
-void Thumb2Assembler::vcvtds(DRegister dd, SRegister sm, Condition cond) {
- EmitVFPds(cond, B23 | B21 | B20 | B18 | B17 | B16 | B7 | B6, dd, sm);
-}
-
-
-void Thumb2Assembler::vcvtis(SRegister sd, SRegister sm, Condition cond) {
- EmitVFPsss(cond, B23 | B21 | B20 | B19 | B18 | B16 | B7 | B6, sd, S0, sm);
-}
-
-
-void Thumb2Assembler::vcvtid(SRegister sd, DRegister dm, Condition cond) {
- EmitVFPsd(cond, B23 | B21 | B20 | B19 | B18 | B16 | B8 | B7 | B6, sd, dm);
-}
-
-
-void Thumb2Assembler::vcvtsi(SRegister sd, SRegister sm, Condition cond) {
- EmitVFPsss(cond, B23 | B21 | B20 | B19 | B7 | B6, sd, S0, sm);
-}
-
-
-void Thumb2Assembler::vcvtdi(DRegister dd, SRegister sm, Condition cond) {
- EmitVFPds(cond, B23 | B21 | B20 | B19 | B8 | B7 | B6, dd, sm);
-}
-
-
-void Thumb2Assembler::vcvtus(SRegister sd, SRegister sm, Condition cond) {
- EmitVFPsss(cond, B23 | B21 | B20 | B19 | B18 | B7 | B6, sd, S0, sm);
-}
-
-
-void Thumb2Assembler::vcvtud(SRegister sd, DRegister dm, Condition cond) {
- EmitVFPsd(cond, B23 | B21 | B20 | B19 | B18 | B8 | B7 | B6, sd, dm);
-}
-
-
-void Thumb2Assembler::vcvtsu(SRegister sd, SRegister sm, Condition cond) {
- EmitVFPsss(cond, B23 | B21 | B20 | B19 | B6, sd, S0, sm);
-}
-
-
-void Thumb2Assembler::vcvtdu(DRegister dd, SRegister sm, Condition cond) {
- EmitVFPds(cond, B23 | B21 | B20 | B19 | B8 | B6, dd, sm);
-}
-
-
-void Thumb2Assembler::vcmps(SRegister sd, SRegister sm, Condition cond) {
- EmitVFPsss(cond, B23 | B21 | B20 | B18 | B6, sd, S0, sm);
-}
-
-
-void Thumb2Assembler::vcmpd(DRegister dd, DRegister dm, Condition cond) {
- EmitVFPddd(cond, B23 | B21 | B20 | B18 | B6, dd, D0, dm);
-}
-
-
-void Thumb2Assembler::vcmpsz(SRegister sd, Condition cond) {
- EmitVFPsss(cond, B23 | B21 | B20 | B18 | B16 | B6, sd, S0, S0);
-}
-
-
-void Thumb2Assembler::vcmpdz(DRegister dd, Condition cond) {
- EmitVFPddd(cond, B23 | B21 | B20 | B18 | B16 | B6, dd, D0, D0);
-}
-
-void Thumb2Assembler::b(Label* label, Condition cond) {
- DCHECK_EQ(next_condition_, AL);
- EmitBranch(cond, label, false, false);
-}
-
-
-void Thumb2Assembler::bl(Label* label, Condition cond) {
- CheckCondition(cond);
- EmitBranch(cond, label, true, false);
-}
-
-
-void Thumb2Assembler::blx(Label* label) {
- EmitBranch(AL, label, true, true);
-}
-
-
-void Thumb2Assembler::MarkExceptionHandler(Label* label) {
- EmitDataProcessing(AL, TST, kCcSet, PC, R0, ShifterOperand(0));
- Label l;
- b(&l);
- EmitBranch(AL, label, false, false);
- Bind(&l);
-}
-
-
-void Thumb2Assembler::Emit32(int32_t value) {
- AssemblerBuffer::EnsureCapacity ensured(&buffer_);
- buffer_.Emit<int16_t>(value >> 16);
- buffer_.Emit<int16_t>(value & 0xffff);
-}
-
-
-void Thumb2Assembler::Emit16(int16_t value) {
- AssemblerBuffer::EnsureCapacity ensured(&buffer_);
- buffer_.Emit<int16_t>(value);
-}
-
-
-bool Thumb2Assembler::Is32BitDataProcessing(Condition cond,
- Opcode opcode,
- SetCc set_cc,
- Register rn,
- Register rd,
- const ShifterOperand& so) {
- if (force_32bit_) {
- return true;
- }
-
- // Check special case for SP relative ADD and SUB immediate.
- if ((opcode == ADD || opcode == SUB) && rn == SP && so.IsImmediate() && set_cc != kCcSet) {
- // If the immediate is in range, use 16 bit.
- if (rd == SP) {
- if (so.GetImmediate() < (1 << 9)) { // 9 bit immediate.
- return false;
- }
- } else if (!IsHighRegister(rd) && opcode == ADD) {
- if (so.GetImmediate() < (1 << 10)) { // 10 bit immediate.
- return false;
- }
- }
- }
-
- bool can_contain_high_register =
- (opcode == CMP) ||
- (opcode == MOV && set_cc != kCcSet) ||
- ((opcode == ADD) && (rn == rd) && set_cc != kCcSet);
-
- if (IsHighRegister(rd) || IsHighRegister(rn)) {
- if (!can_contain_high_register) {
- return true;
- }
-
- // There are high register instructions available for this opcode.
- // However, there is no actual shift available, neither for ADD nor for MOV (ASR/LSR/LSL/ROR).
- if (so.IsShift() && (so.GetShift() == RRX || so.GetImmediate() != 0u)) {
- return true;
- }
-
- // The ADD and MOV instructions that work with high registers don't have 16-bit
- // immediate variants.
- if (so.IsImmediate()) {
- return true;
- }
- }
-
- if (so.IsRegister() && IsHighRegister(so.GetRegister()) && !can_contain_high_register) {
- return true;
- }
-
- bool rn_is_valid = true;
-
- // Check for single operand instructions and ADD/SUB.
- switch (opcode) {
- case CMP:
- case MOV:
- case TST:
- case MVN:
- rn_is_valid = false; // There is no Rn for these instructions.
- break;
- case TEQ:
- case ORN:
- return true;
- case ADD:
- case SUB:
- break;
- default:
- if (so.IsRegister() && rd != rn) {
- return true;
- }
- }
-
- if (so.IsImmediate()) {
- if (opcode == RSB) {
- DCHECK(rn_is_valid);
- if (so.GetImmediate() != 0u) {
- return true;
- }
- } else if (rn_is_valid && rn != rd) {
- // The only thumb1 instructions with a register and an immediate are ADD and SUB
- // with a 3-bit immediate, and RSB with zero immediate.
- if (opcode == ADD || opcode == SUB) {
- if ((cond == AL) ? set_cc == kCcKeep : set_cc == kCcSet) {
- return true; // Cannot match "setflags".
- }
- if (!IsUint<3>(so.GetImmediate()) && !IsUint<3>(-so.GetImmediate())) {
- return true;
- }
- } else {
- return true;
- }
- } else {
- // ADD, SUB, CMP and MOV may be thumb1 only if the immediate is 8 bits.
- if (!(opcode == ADD || opcode == SUB || opcode == MOV || opcode == CMP)) {
- return true;
- } else if (opcode != CMP && ((cond == AL) ? set_cc == kCcKeep : set_cc == kCcSet)) {
- return true; // Cannot match "setflags" for ADD, SUB or MOV.
- } else {
- // For ADD and SUB allow also negative 8-bit immediate as we will emit the oposite opcode.
- if (!IsUint<8>(so.GetImmediate()) &&
- (opcode == MOV || opcode == CMP || !IsUint<8>(-so.GetImmediate()))) {
- return true;
- }
- }
- }
- } else {
- DCHECK(so.IsRegister());
- if (so.IsShift()) {
- // Shift operand - check if it is a MOV convertible to a 16-bit shift instruction.
- if (opcode != MOV) {
- return true;
- }
- // Check for MOV with an ROR/RRX. There is no 16-bit ROR immediate and no 16-bit RRX.
- if (so.GetShift() == ROR || so.GetShift() == RRX) {
- return true;
- }
- // 16-bit shifts set condition codes if and only if outside IT block,
- // i.e. if and only if cond == AL.
- if ((cond == AL) ? set_cc == kCcKeep : set_cc == kCcSet) {
- return true;
- }
- } else {
- // Register operand without shift.
- switch (opcode) {
- case ADD:
- // The 16-bit ADD that cannot contain high registers can set condition codes
- // if and only if outside IT block, i.e. if and only if cond == AL.
- if (!can_contain_high_register &&
- ((cond == AL) ? set_cc == kCcKeep : set_cc == kCcSet)) {
- return true;
- }
- break;
- case AND:
- case BIC:
- case EOR:
- case ORR:
- case MVN:
- case ADC:
- case SUB:
- case SBC:
- // These 16-bit opcodes set condition codes if and only if outside IT block,
- // i.e. if and only if cond == AL.
- if ((cond == AL) ? set_cc == kCcKeep : set_cc == kCcSet) {
- return true;
- }
- break;
- case RSB:
- case RSC:
- // No 16-bit RSB/RSC Rd, Rm, Rn. It would be equivalent to SUB/SBC Rd, Rn, Rm.
- return true;
- case CMP:
- default:
- break;
- }
- }
- }
-
- // The instruction can be encoded in 16 bits.
- return false;
-}
-
-
-void Thumb2Assembler::Emit32BitDataProcessing(Condition cond ATTRIBUTE_UNUSED,
- Opcode opcode,
- SetCc set_cc,
- Register rn,
- Register rd,
- const ShifterOperand& so) {
- uint8_t thumb_opcode = 255U /* 0b11111111 */;
- switch (opcode) {
- case AND: thumb_opcode = 0U /* 0b0000 */; break;
- case EOR: thumb_opcode = 4U /* 0b0100 */; break;
- case SUB: thumb_opcode = 13U /* 0b1101 */; break;
- case RSB: thumb_opcode = 14U /* 0b1110 */; break;
- case ADD: thumb_opcode = 8U /* 0b1000 */; break;
- case ADC: thumb_opcode = 10U /* 0b1010 */; break;
- case SBC: thumb_opcode = 11U /* 0b1011 */; break;
- case RSC: break;
- case TST: thumb_opcode = 0U /* 0b0000 */; DCHECK(set_cc == kCcSet); rd = PC; break;
- case TEQ: thumb_opcode = 4U /* 0b0100 */; DCHECK(set_cc == kCcSet); rd = PC; break;
- case CMP: thumb_opcode = 13U /* 0b1101 */; DCHECK(set_cc == kCcSet); rd = PC; break;
- case CMN: thumb_opcode = 8U /* 0b1000 */; DCHECK(set_cc == kCcSet); rd = PC; break;
- case ORR: thumb_opcode = 2U /* 0b0010 */; break;
- case MOV: thumb_opcode = 2U /* 0b0010 */; rn = PC; break;
- case BIC: thumb_opcode = 1U /* 0b0001 */; break;
- case MVN: thumb_opcode = 3U /* 0b0011 */; rn = PC; break;
- case ORN: thumb_opcode = 3U /* 0b0011 */; break;
- default:
- break;
- }
-
- if (thumb_opcode == 255U /* 0b11111111 */) {
- LOG(FATAL) << "Invalid thumb2 opcode " << opcode;
- UNREACHABLE();
- }
-
- int32_t encoding = 0;
- if (so.IsImmediate()) {
- // Check special cases.
- if ((opcode == SUB || opcode == ADD) && (so.GetImmediate() < (1u << 12)) &&
- /* Prefer T3 encoding to T4. */ !ShifterOperandCanAlwaysHold(so.GetImmediate())) {
- if (set_cc != kCcSet) {
- if (opcode == SUB) {
- thumb_opcode = 5U;
- } else if (opcode == ADD) {
- thumb_opcode = 0U;
- }
- }
- uint32_t imm = so.GetImmediate();
-
- uint32_t i = (imm >> 11) & 1;
- uint32_t imm3 = (imm >> 8) & 7U /* 0b111 */;
- uint32_t imm8 = imm & 0xff;
-
- encoding = B31 | B30 | B29 | B28 |
- (set_cc == kCcSet ? B20 : B25) |
- thumb_opcode << 21 |
- rn << 16 |
- rd << 8 |
- i << 26 |
- imm3 << 12 |
- imm8;
- } else {
- // Modified immediate.
- uint32_t imm = ModifiedImmediate(so.encodingThumb());
- if (imm == kInvalidModifiedImmediate) {
- LOG(FATAL) << "Immediate value cannot fit in thumb2 modified immediate";
- UNREACHABLE();
- }
- encoding = B31 | B30 | B29 | B28 |
- thumb_opcode << 21 |
- (set_cc == kCcSet ? B20 : 0) |
- rn << 16 |
- rd << 8 |
- imm;
- }
- } else if (so.IsRegister()) {
- // Register (possibly shifted)
- encoding = B31 | B30 | B29 | B27 | B25 |
- thumb_opcode << 21 |
- (set_cc == kCcSet ? B20 : 0) |
- rn << 16 |
- rd << 8 |
- so.encodingThumb();
- }
- Emit32(encoding);
-}
-
-
-void Thumb2Assembler::Emit16BitDataProcessing(Condition cond,
- Opcode opcode,
- SetCc set_cc,
- Register rn,
- Register rd,
- const ShifterOperand& so) {
- if (opcode == ADD || opcode == SUB) {
- Emit16BitAddSub(cond, opcode, set_cc, rn, rd, so);
- return;
- }
- uint8_t thumb_opcode = 255U /* 0b11111111 */;
- // Thumb1.
- uint8_t dp_opcode = 1U /* 0b01 */;
- uint8_t opcode_shift = 6;
- uint8_t rd_shift = 0;
- uint8_t rn_shift = 3;
- uint8_t immediate_shift = 0;
- bool use_immediate = false;
- uint8_t immediate = 0;
-
- if (opcode == MOV && so.IsRegister() && so.IsShift()) {
- // Convert shifted mov operand2 into 16 bit opcodes.
- dp_opcode = 0;
- opcode_shift = 11;
-
- use_immediate = true;
- immediate = so.GetImmediate();
- immediate_shift = 6;
-
- rn = so.GetRegister();
-
- switch (so.GetShift()) {
- case LSL:
- DCHECK_LE(immediate, 31u);
- thumb_opcode = 0U /* 0b00 */;
- break;
- case LSR:
- DCHECK(1 <= immediate && immediate <= 32);
- immediate &= 31; // 32 is encoded as 0.
- thumb_opcode = 1U /* 0b01 */;
- break;
- case ASR:
- DCHECK(1 <= immediate && immediate <= 32);
- immediate &= 31; // 32 is encoded as 0.
- thumb_opcode = 2U /* 0b10 */;
- break;
- case ROR: // No 16-bit ROR immediate.
- case RRX: // No 16-bit RRX.
- default:
- LOG(FATAL) << "Unexpected shift: " << so.GetShift();
- UNREACHABLE();
- }
- } else {
- if (so.IsImmediate()) {
- use_immediate = true;
- immediate = so.GetImmediate();
- } else {
- CHECK(!(so.IsRegister() && so.IsShift() && so.GetSecondRegister() != kNoRegister))
- << "No register-shifted register instruction available in thumb";
- // Adjust rn and rd: only two registers will be emitted.
- switch (opcode) {
- case AND:
- case ORR:
- case EOR:
- case RSB:
- case ADC:
- case SBC:
- case BIC: {
- // Sets condition codes if and only if outside IT block,
- // check that it complies with set_cc.
- DCHECK((cond == AL) ? set_cc != kCcKeep : set_cc != kCcSet);
- if (rn == rd) {
- rn = so.GetRegister();
- } else {
- CHECK_EQ(rd, so.GetRegister());
- }
- break;
- }
- case CMP:
- case CMN: {
- CHECK_EQ(rd, 0);
- rd = rn;
- rn = so.GetRegister();
- break;
- }
- case MVN: {
- // Sets condition codes if and only if outside IT block,
- // check that it complies with set_cc.
- DCHECK((cond == AL) ? set_cc != kCcKeep : set_cc != kCcSet);
- CHECK_EQ(rn, 0);
- rn = so.GetRegister();
- break;
- }
- case TST:
- case TEQ: {
- DCHECK(set_cc == kCcSet);
- CHECK_EQ(rn, 0);
- rn = so.GetRegister();
- break;
- }
- default:
- break;
- }
- }
-
- switch (opcode) {
- case AND: thumb_opcode = 0U /* 0b0000 */; break;
- case ORR: thumb_opcode = 12U /* 0b1100 */; break;
- case EOR: thumb_opcode = 1U /* 0b0001 */; break;
- case RSB: thumb_opcode = 9U /* 0b1001 */; break;
- case ADC: thumb_opcode = 5U /* 0b0101 */; break;
- case SBC: thumb_opcode = 6U /* 0b0110 */; break;
- case BIC: thumb_opcode = 14U /* 0b1110 */; break;
- case TST: thumb_opcode = 8U /* 0b1000 */; CHECK(!use_immediate); break;
- case MVN: thumb_opcode = 15U /* 0b1111 */; CHECK(!use_immediate); break;
- case CMP: {
- DCHECK(set_cc == kCcSet);
- if (use_immediate) {
- // T2 encoding.
- dp_opcode = 0;
- opcode_shift = 11;
- thumb_opcode = 5U /* 0b101 */;
- rd_shift = 8;
- rn_shift = 8;
- } else if (IsHighRegister(rd) || IsHighRegister(rn)) {
- // Special cmp for high registers.
- dp_opcode = 1U /* 0b01 */;
- opcode_shift = 7;
- // Put the top bit of rd into the bottom bit of the opcode.
- thumb_opcode = 10U /* 0b0001010 */ | static_cast<uint32_t>(rd) >> 3;
- rd = static_cast<Register>(static_cast<uint32_t>(rd) & 7U /* 0b111 */);
- } else {
- thumb_opcode = 10U /* 0b1010 */;
- }
-
- break;
- }
- case CMN: {
- CHECK(!use_immediate);
- thumb_opcode = 11U /* 0b1011 */;
- break;
- }
- case MOV:
- dp_opcode = 0;
- if (use_immediate) {
- // T2 encoding.
- opcode_shift = 11;
- thumb_opcode = 4U /* 0b100 */;
- rd_shift = 8;
- rn_shift = 8;
- } else {
- rn = so.GetRegister();
- if (set_cc != kCcSet) {
- // Special mov for high registers.
- dp_opcode = 1U /* 0b01 */;
- opcode_shift = 7;
- // Put the top bit of rd into the bottom bit of the opcode.
- thumb_opcode = 12U /* 0b0001100 */ | static_cast<uint32_t>(rd) >> 3;
- rd = static_cast<Register>(static_cast<uint32_t>(rd) & 7U /* 0b111 */);
- } else {
- DCHECK(!IsHighRegister(rn));
- DCHECK(!IsHighRegister(rd));
- thumb_opcode = 0;
- }
- }
- break;
-
- case TEQ:
- case RSC:
- default:
- LOG(FATAL) << "Invalid thumb1 opcode " << opcode;
- break;
- }
- }
-
- if (thumb_opcode == 255U /* 0b11111111 */) {
- LOG(FATAL) << "Invalid thumb1 opcode " << opcode;
- UNREACHABLE();
- }
-
- int16_t encoding = dp_opcode << 14 |
- (thumb_opcode << opcode_shift) |
- rd << rd_shift |
- rn << rn_shift |
- (use_immediate ? (immediate << immediate_shift) : 0);
-
- Emit16(encoding);
-}
-
-
-// ADD and SUB are complex enough to warrant their own emitter.
-void Thumb2Assembler::Emit16BitAddSub(Condition cond,
- Opcode opcode,
- SetCc set_cc,
- Register rn,
- Register rd,
- const ShifterOperand& so) {
- uint8_t dp_opcode = 0;
- uint8_t opcode_shift = 6;
- uint8_t rd_shift = 0;
- uint8_t rn_shift = 3;
- uint8_t immediate_shift = 0;
- bool use_immediate = false;
- uint32_t immediate = 0; // Should be at most 10 bits but keep the full immediate for CHECKs.
- uint8_t thumb_opcode;
-
- if (so.IsImmediate()) {
- use_immediate = true;
- immediate = so.GetImmediate();
- if (!IsUint<10>(immediate)) {
- // Flip ADD/SUB.
- opcode = (opcode == ADD) ? SUB : ADD;
- immediate = -immediate;
- DCHECK(IsUint<10>(immediate)); // More stringent checks below.
- }
- }
-
- switch (opcode) {
- case ADD:
- if (so.IsRegister()) {
- Register rm = so.GetRegister();
- if (rn == rd && set_cc != kCcSet) {
- // Can use T2 encoding (allows 4 bit registers)
- dp_opcode = 1U /* 0b01 */;
- opcode_shift = 10;
- thumb_opcode = 1U /* 0b0001 */;
- // Make Rn also contain the top bit of rd.
- rn = static_cast<Register>(static_cast<uint32_t>(rm) |
- (static_cast<uint32_t>(rd) & 8U /* 0b1000 */) << 1);
- rd = static_cast<Register>(static_cast<uint32_t>(rd) & 7U /* 0b111 */);
- } else {
- // T1.
- DCHECK(!IsHighRegister(rd));
- DCHECK(!IsHighRegister(rn));
- DCHECK(!IsHighRegister(rm));
- // Sets condition codes if and only if outside IT block,
- // check that it complies with set_cc.
- DCHECK((cond == AL) ? set_cc != kCcKeep : set_cc != kCcSet);
- opcode_shift = 9;
- thumb_opcode = 12U /* 0b01100 */;
- immediate = static_cast<uint32_t>(so.GetRegister());
- use_immediate = true;
- immediate_shift = 6;
- }
- } else {
- // Immediate.
- if (rd == SP && rn == SP) {
- // ADD sp, sp, #imm
- dp_opcode = 2U /* 0b10 */;
- thumb_opcode = 3U /* 0b11 */;
- opcode_shift = 12;
- CHECK(IsUint<9>(immediate));
- CHECK_ALIGNED(immediate, 4);
-
- // Remove rd and rn from instruction by orring it with immed and clearing bits.
- rn = R0;
- rd = R0;
- rd_shift = 0;
- rn_shift = 0;
- immediate >>= 2;
- } else if (rd != SP && rn == SP) {
- // ADD rd, SP, #imm
- dp_opcode = 2U /* 0b10 */;
- thumb_opcode = 5U /* 0b101 */;
- opcode_shift = 11;
- CHECK(IsUint<10>(immediate));
- CHECK_ALIGNED(immediate, 4);
-
- // Remove rn from instruction.
- rn = R0;
- rn_shift = 0;
- rd_shift = 8;
- immediate >>= 2;
- } else if (rn != rd) {
- // Must use T1.
- CHECK(IsUint<3>(immediate));
- opcode_shift = 9;
- thumb_opcode = 14U /* 0b01110 */;
- immediate_shift = 6;
- } else {
- // T2 encoding.
- CHECK(IsUint<8>(immediate));
- opcode_shift = 11;
- thumb_opcode = 6U /* 0b110 */;
- rd_shift = 8;
- rn_shift = 8;
- }
- }
- break;
-
- case SUB:
- if (so.IsRegister()) {
- // T1.
- Register rm = so.GetRegister();
- DCHECK(!IsHighRegister(rd));
- DCHECK(!IsHighRegister(rn));
- DCHECK(!IsHighRegister(rm));
- // Sets condition codes if and only if outside IT block,
- // check that it complies with set_cc.
- DCHECK((cond == AL) ? set_cc != kCcKeep : set_cc != kCcSet);
- opcode_shift = 9;
- thumb_opcode = 13U /* 0b01101 */;
- immediate = static_cast<uint32_t>(rm);
- use_immediate = true;
- immediate_shift = 6;
- } else {
- if (rd == SP && rn == SP) {
- // SUB sp, sp, #imm
- dp_opcode = 2U /* 0b10 */;
- thumb_opcode = 0x61 /* 0b1100001 */;
- opcode_shift = 7;
- CHECK(IsUint<9>(immediate));
- CHECK_ALIGNED(immediate, 4);
-
- // Remove rd and rn from instruction by orring it with immed and clearing bits.
- rn = R0;
- rd = R0;
- rd_shift = 0;
- rn_shift = 0;
- immediate >>= 2;
- } else if (rn != rd) {
- // Must use T1.
- CHECK(IsUint<3>(immediate));
- opcode_shift = 9;
- thumb_opcode = 15U /* 0b01111 */;
- immediate_shift = 6;
- } else {
- // T2 encoding.
- CHECK(IsUint<8>(immediate));
- opcode_shift = 11;
- thumb_opcode = 7U /* 0b111 */;
- rd_shift = 8;
- rn_shift = 8;
- }
- }
- break;
- default:
- LOG(FATAL) << "This opcode is not an ADD or SUB: " << opcode;
- UNREACHABLE();
- }
-
- int16_t encoding = dp_opcode << 14 |
- (thumb_opcode << opcode_shift) |
- rd << rd_shift |
- rn << rn_shift |
- (use_immediate ? (immediate << immediate_shift) : 0);
-
- Emit16(encoding);
-}
-
-
-void Thumb2Assembler::EmitDataProcessing(Condition cond,
- Opcode opcode,
- SetCc set_cc,
- Register rn,
- Register rd,
- const ShifterOperand& so) {
- CHECK_NE(rd, kNoRegister);
- CheckCondition(cond);
-
- if (Is32BitDataProcessing(cond, opcode, set_cc, rn, rd, so)) {
- Emit32BitDataProcessing(cond, opcode, set_cc, rn, rd, so);
- } else {
- Emit16BitDataProcessing(cond, opcode, set_cc, rn, rd, so);
- }
-}
-
-void Thumb2Assembler::EmitShift(Register rd,
- Register rm,
- Shift shift,
- uint8_t amount,
- Condition cond,
- SetCc set_cc) {
- CHECK_LT(amount, (1 << 5));
- if ((IsHighRegister(rd) || IsHighRegister(rm) || shift == ROR || shift == RRX) ||
- ((cond == AL) ? set_cc == kCcKeep : set_cc == kCcSet)) {
- uint16_t opcode = 0;
- switch (shift) {
- case LSL: opcode = 0U /* 0b00 */; break;
- case LSR: opcode = 1U /* 0b01 */; break;
- case ASR: opcode = 2U /* 0b10 */; break;
- case ROR: opcode = 3U /* 0b11 */; break;
- case RRX: opcode = 3U /* 0b11 */; amount = 0; break;
- default:
- LOG(FATAL) << "Unsupported thumb2 shift opcode";
- UNREACHABLE();
- }
- // 32 bit.
- int32_t encoding = B31 | B30 | B29 | B27 | B25 | B22 |
- 0xf << 16 | (set_cc == kCcSet ? B20 : 0);
- uint32_t imm3 = amount >> 2;
- uint32_t imm2 = amount & 3U /* 0b11 */;
- encoding |= imm3 << 12 | imm2 << 6 | static_cast<int16_t>(rm) |
- static_cast<int16_t>(rd) << 8 | opcode << 4;
- Emit32(encoding);
- } else {
- // 16 bit shift
- uint16_t opcode = 0;
- switch (shift) {
- case LSL: opcode = 0U /* 0b00 */; break;
- case LSR: opcode = 1U /* 0b01 */; break;
- case ASR: opcode = 2U /* 0b10 */; break;
- default:
- LOG(FATAL) << "Unsupported thumb2 shift opcode";
- UNREACHABLE();
- }
- int16_t encoding = opcode << 11 | amount << 6 | static_cast<int16_t>(rm) << 3 |
- static_cast<int16_t>(rd);
- Emit16(encoding);
- }
-}
-
-void Thumb2Assembler::EmitShift(Register rd,
- Register rn,
- Shift shift,
- Register rm,
- Condition cond,
- SetCc set_cc) {
- CHECK_NE(shift, RRX);
- bool must_be_32bit = false;
- if (IsHighRegister(rd) || IsHighRegister(rm) || IsHighRegister(rn) || rd != rn ||
- ((cond == AL) ? set_cc == kCcKeep : set_cc == kCcSet)) {
- must_be_32bit = true;
- }
-
- if (must_be_32bit) {
- uint16_t opcode = 0;
- switch (shift) {
- case LSL: opcode = 0U /* 0b00 */; break;
- case LSR: opcode = 1U /* 0b01 */; break;
- case ASR: opcode = 2U /* 0b10 */; break;
- case ROR: opcode = 3U /* 0b11 */; break;
- default:
- LOG(FATAL) << "Unsupported thumb2 shift opcode";
- UNREACHABLE();
- }
- // 32 bit.
- int32_t encoding = B31 | B30 | B29 | B28 | B27 | B25 |
- 0xf << 12 | (set_cc == kCcSet ? B20 : 0);
- encoding |= static_cast<int16_t>(rn) << 16 | static_cast<int16_t>(rm) |
- static_cast<int16_t>(rd) << 8 | opcode << 21;
- Emit32(encoding);
- } else {
- uint16_t opcode = 0;
- switch (shift) {
- case LSL: opcode = 2U /* 0b0010 */; break;
- case LSR: opcode = 3U /* 0b0011 */; break;
- case ASR: opcode = 4U /* 0b0100 */; break;
- case ROR: opcode = 7U /* 0b0111 */; break;
- default:
- LOG(FATAL) << "Unsupported thumb2 shift opcode";
- UNREACHABLE();
- }
- int16_t encoding = B14 | opcode << 6 | static_cast<int16_t>(rm) << 3 |
- static_cast<int16_t>(rd);
- Emit16(encoding);
- }
-}
-
-inline size_t Thumb2Assembler::Fixup::SizeInBytes(Size size) {
- switch (size) {
- case kBranch16Bit:
- return 2u;
- case kBranch32Bit:
- return 4u;
-
- case kCbxz16Bit:
- return 2u;
- case kCbxz32Bit:
- return 4u;
- case kCbxz48Bit:
- return 6u;
-
- case kCodeAddr4KiB:
- return 4u;
-
- case kLiteral1KiB:
- return 2u;
- case kLiteral4KiB:
- return 4u;
- case kLiteral64KiB:
- return 8u;
- case kLiteral1MiB:
- return 10u;
- case kLiteralFar:
- return 14u;
-
- case kLiteralAddr1KiB:
- return 2u;
- case kLiteralAddr4KiB:
- return 4u;
- case kLiteralAddr64KiB:
- return 6u;
- case kLiteralAddrFar:
- return 10u;
-
- case kLongOrFPLiteral1KiB:
- return 4u;
- case kLongOrFPLiteral64KiB:
- return 10u;
- case kLongOrFPLiteralFar:
- return 14u;
- }
- LOG(FATAL) << "Unexpected size: " << static_cast<int>(size);
- UNREACHABLE();
-}
-
-inline uint32_t Thumb2Assembler::Fixup::GetOriginalSizeInBytes() const {
- return SizeInBytes(original_size_);
-}
-
-inline uint32_t Thumb2Assembler::Fixup::GetSizeInBytes() const {
- return SizeInBytes(size_);
-}
-
-inline size_t Thumb2Assembler::Fixup::LiteralPoolPaddingSize(uint32_t current_code_size) {
- // The code size must be a multiple of 2.
- DCHECK_ALIGNED(current_code_size, 2);
- // If it isn't a multiple of 4, we need to add a 2-byte padding before the literal pool.
- return current_code_size & 2;
-}
-
-inline int32_t Thumb2Assembler::Fixup::GetOffset(uint32_t current_code_size) const {
- static constexpr int32_t int32_min = std::numeric_limits<int32_t>::min();
- static constexpr int32_t int32_max = std::numeric_limits<int32_t>::max();
- DCHECK_LE(target_, static_cast<uint32_t>(int32_max));
- DCHECK_LE(location_, static_cast<uint32_t>(int32_max));
- DCHECK_LE(adjustment_, static_cast<uint32_t>(int32_max));
- int32_t diff = static_cast<int32_t>(target_) - static_cast<int32_t>(location_);
- if (target_ > location_) {
- DCHECK_LE(adjustment_, static_cast<uint32_t>(int32_max - diff));
- diff += static_cast<int32_t>(adjustment_);
- } else {
- DCHECK_LE(int32_min + static_cast<int32_t>(adjustment_), diff);
- diff -= static_cast<int32_t>(adjustment_);
- }
- // The default PC adjustment for Thumb2 is 4 bytes.
- DCHECK_GE(diff, int32_min + 4);
- diff -= 4;
- // Add additional adjustment for instructions preceding the PC usage, padding
- // before the literal pool and rounding down the PC for literal loads.
- switch (GetSize()) {
- case kBranch16Bit:
- case kBranch32Bit:
- break;
-
- case kCbxz16Bit:
- break;
- case kCbxz32Bit:
- case kCbxz48Bit:
- DCHECK_GE(diff, int32_min + 2);
- diff -= 2; // Extra CMP Rn, #0, 16-bit.
- break;
-
- case kCodeAddr4KiB:
- // The ADR instruction rounds down the PC+4 to a multiple of 4, so if the PC
- // isn't a multiple of 2, we need to adjust.
- DCHECK_ALIGNED(diff, 2);
- diff += location_ & 2;
- // Add the Thumb mode bit.
- diff += 1;
- break;
-
- case kLiteral1KiB:
- case kLiteral4KiB:
- case kLongOrFPLiteral1KiB:
- case kLiteralAddr1KiB:
- case kLiteralAddr4KiB:
- DCHECK(diff >= 0 || (GetSize() == kLiteral1KiB && diff == -2));
- diff += LiteralPoolPaddingSize(current_code_size);
- // Load literal instructions round down the PC+4 to a multiple of 4, so if the PC
- // isn't a multiple of 2, we need to adjust. Since we already adjusted for the target
- // being aligned, current PC alignment can be inferred from diff.
- DCHECK_ALIGNED(diff, 2);
- diff = diff + (diff & 2);
- DCHECK_GE(diff, 0);
- break;
- case kLiteral64KiB:
- case kLiteral1MiB:
- case kLongOrFPLiteral64KiB:
- case kLiteralAddr64KiB:
- DCHECK_GE(diff, 4); // The target must be at least 4 bytes after the ADD rX, PC.
- diff -= 4; // One extra 32-bit MOV.
- diff += LiteralPoolPaddingSize(current_code_size);
- break;
- case kLiteralFar:
- case kLongOrFPLiteralFar:
- case kLiteralAddrFar:
- DCHECK_GE(diff, 8); // The target must be at least 4 bytes after the ADD rX, PC.
- diff -= 8; // Extra MOVW+MOVT; both 32-bit.
- diff += LiteralPoolPaddingSize(current_code_size);
- break;
- }
- return diff;
-}
-
-inline size_t Thumb2Assembler::Fixup::IncreaseSize(Size new_size) {
- DCHECK_NE(target_, kUnresolved);
- Size old_size = size_;
- size_ = new_size;
- DCHECK_GT(SizeInBytes(new_size), SizeInBytes(old_size));
- size_t adjustment = SizeInBytes(new_size) - SizeInBytes(old_size);
- if (target_ > location_) {
- adjustment_ += adjustment;
- }
- return adjustment;
-}
-
-bool Thumb2Assembler::Fixup::IsCandidateForEmitEarly() const {
- DCHECK(size_ == original_size_);
- if (target_ == kUnresolved) {
- return false;
- }
- // GetOffset() does not depend on current_code_size for branches, only for literals.
- constexpr uint32_t current_code_size = 0u;
- switch (GetSize()) {
- case kBranch16Bit:
- return IsInt(cond_ != AL ? 9 : 12, GetOffset(current_code_size));
- case kBranch32Bit:
- // We don't support conditional branches beyond +-1MiB
- // or unconditional branches beyond +-16MiB.
- return true;
-
- case kCbxz16Bit:
- return IsUint<7>(GetOffset(current_code_size));
- case kCbxz32Bit:
- return IsInt<9>(GetOffset(current_code_size));
- case kCbxz48Bit:
- // We don't support conditional branches beyond +-1MiB.
- return true;
-
- case kCodeAddr4KiB:
- // ADR uses the aligned PC and as such the offset cannot be calculated early.
- return false;
-
- case kLiteral1KiB:
- case kLiteral4KiB:
- case kLiteral64KiB:
- case kLiteral1MiB:
- case kLiteralFar:
- case kLiteralAddr1KiB:
- case kLiteralAddr4KiB:
- case kLiteralAddr64KiB:
- case kLiteralAddrFar:
- case kLongOrFPLiteral1KiB:
- case kLongOrFPLiteral64KiB:
- case kLongOrFPLiteralFar:
- return false;
- }
-}
-
-uint32_t Thumb2Assembler::Fixup::AdjustSizeIfNeeded(uint32_t current_code_size) {
- uint32_t old_code_size = current_code_size;
- switch (GetSize()) {
- case kBranch16Bit:
- if (IsInt(cond_ != AL ? 9 : 12, GetOffset(current_code_size))) {
- break;
- }
- current_code_size += IncreaseSize(kBranch32Bit);
- FALLTHROUGH_INTENDED;
- case kBranch32Bit:
- // We don't support conditional branches beyond +-1MiB
- // or unconditional branches beyond +-16MiB.
- break;
-
- case kCbxz16Bit:
- if (IsUint<7>(GetOffset(current_code_size))) {
- break;
- }
- current_code_size += IncreaseSize(kCbxz32Bit);
- FALLTHROUGH_INTENDED;
- case kCbxz32Bit:
- if (IsInt<9>(GetOffset(current_code_size))) {
- break;
- }
- current_code_size += IncreaseSize(kCbxz48Bit);
- FALLTHROUGH_INTENDED;
- case kCbxz48Bit:
- // We don't support conditional branches beyond +-1MiB.
- break;
-
- case kCodeAddr4KiB:
- // We don't support Code address ADR beyond +4KiB.
- break;
-
- case kLiteral1KiB:
- DCHECK(!IsHighRegister(rn_));
- if (IsUint<10>(GetOffset(current_code_size))) {
- break;
- }
- current_code_size += IncreaseSize(kLiteral4KiB);
- FALLTHROUGH_INTENDED;
- case kLiteral4KiB:
- if (IsUint<12>(GetOffset(current_code_size))) {
- break;
- }
- current_code_size += IncreaseSize(kLiteral64KiB);
- FALLTHROUGH_INTENDED;
- case kLiteral64KiB:
- // Can't handle high register which we can encounter by fall-through from kLiteral4KiB.
- if (!IsHighRegister(rn_) && IsUint<16>(GetOffset(current_code_size))) {
- break;
- }
- current_code_size += IncreaseSize(kLiteral1MiB);
- FALLTHROUGH_INTENDED;
- case kLiteral1MiB:
- if (IsUint<20>(GetOffset(current_code_size))) {
- break;
- }
- current_code_size += IncreaseSize(kLiteralFar);
- FALLTHROUGH_INTENDED;
- case kLiteralFar:
- // This encoding can reach any target.
- break;
-
- case kLiteralAddr1KiB:
- DCHECK(!IsHighRegister(rn_));
- if (IsUint<10>(GetOffset(current_code_size))) {
- break;
- }
- current_code_size += IncreaseSize(kLiteralAddr4KiB);
- FALLTHROUGH_INTENDED;
- case kLiteralAddr4KiB:
- if (IsUint<12>(GetOffset(current_code_size))) {
- break;
- }
- current_code_size += IncreaseSize(kLiteralAddr64KiB);
- FALLTHROUGH_INTENDED;
- case kLiteralAddr64KiB:
- if (IsUint<16>(GetOffset(current_code_size))) {
- break;
- }
- current_code_size += IncreaseSize(kLiteralAddrFar);
- FALLTHROUGH_INTENDED;
- case kLiteralAddrFar:
- // This encoding can reach any target.
- break;
-
- case kLongOrFPLiteral1KiB:
- if (IsUint<10>(GetOffset(current_code_size))) {
- break;
- }
- current_code_size += IncreaseSize(kLongOrFPLiteral64KiB);
- FALLTHROUGH_INTENDED;
- case kLongOrFPLiteral64KiB:
- if (IsUint<16>(GetOffset(current_code_size))) {
- break;
- }
- current_code_size += IncreaseSize(kLongOrFPLiteralFar);
- FALLTHROUGH_INTENDED;
- case kLongOrFPLiteralFar:
- // This encoding can reach any target.
- break;
- }
- return current_code_size - old_code_size;
-}
-
-void Thumb2Assembler::Fixup::Emit(uint32_t emit_location,
- AssemblerBuffer* buffer,
- uint32_t code_size) const {
- switch (GetSize()) {
- case kBranch16Bit: {
- DCHECK(type_ == kUnconditional || type_ == kConditional);
- DCHECK_EQ(type_ == kConditional, cond_ != AL);
- int16_t encoding = BEncoding16(GetOffset(code_size), cond_);
- buffer->Store<int16_t>(emit_location, encoding);
- break;
- }
- case kBranch32Bit: {
- DCHECK(type_ == kConditional || type_ == kUnconditional ||
- type_ == kUnconditionalLink || type_ == kUnconditionalLinkX);
- DCHECK_EQ(type_ == kConditional, cond_ != AL);
- int32_t encoding = BEncoding32(GetOffset(code_size), cond_);
- if (type_ == kUnconditionalLink) {
- DCHECK_NE(encoding & B12, 0);
- encoding |= B14;
- } else if (type_ == kUnconditionalLinkX) {
- DCHECK_NE(encoding & B12, 0);
- encoding ^= B14 | B12;
- }
- buffer->Store<int16_t>(emit_location, encoding >> 16);
- buffer->Store<int16_t>(emit_location + 2u, static_cast<int16_t>(encoding & 0xffff));
- break;
- }
-
- case kCbxz16Bit: {
- DCHECK(type_ == kCompareAndBranchXZero);
- int16_t encoding = CbxzEncoding16(rn_, GetOffset(code_size), cond_);
- buffer->Store<int16_t>(emit_location, encoding);
- break;
- }
- case kCbxz32Bit: {
- DCHECK(type_ == kCompareAndBranchXZero);
- DCHECK(cond_ == EQ || cond_ == NE);
- int16_t cmp_encoding = CmpRnImm8Encoding16(rn_, 0);
- int16_t b_encoding = BEncoding16(GetOffset(code_size), cond_);
- buffer->Store<int16_t>(emit_location, cmp_encoding);
- buffer->Store<int16_t>(emit_location + 2, b_encoding);
- break;
- }
- case kCbxz48Bit: {
- DCHECK(type_ == kCompareAndBranchXZero);
- DCHECK(cond_ == EQ || cond_ == NE);
- int16_t cmp_encoding = CmpRnImm8Encoding16(rn_, 0);
- int32_t b_encoding = BEncoding32(GetOffset(code_size), cond_);
- buffer->Store<int16_t>(emit_location, cmp_encoding);
- buffer->Store<int16_t>(emit_location + 2u, b_encoding >> 16);
- buffer->Store<int16_t>(emit_location + 4u, static_cast<int16_t>(b_encoding & 0xffff));
- break;
- }
-
- case kCodeAddr4KiB: {
- DCHECK(type_ == kLoadCodeAddr);
- int32_t encoding = AdrEncoding32(rn_, GetOffset(code_size));
- buffer->Store<int16_t>(emit_location, encoding >> 16);
- buffer->Store<int16_t>(emit_location + 2u, static_cast<int16_t>(encoding & 0xffff));
- break;
- }
-
- case kLiteral1KiB: {
- DCHECK(type_ == kLoadLiteralNarrow);
- int16_t encoding = LdrLitEncoding16(rn_, GetOffset(code_size));
- buffer->Store<int16_t>(emit_location, encoding);
- break;
- }
- case kLiteral4KiB: {
- DCHECK(type_ == kLoadLiteralNarrow);
- // GetOffset() uses PC+4 but load literal uses AlignDown(PC+4, 4). Adjust offset accordingly.
- int32_t encoding = LdrLitEncoding32(rn_, GetOffset(code_size));
- buffer->Store<int16_t>(emit_location, encoding >> 16);
- buffer->Store<int16_t>(emit_location + 2u, static_cast<int16_t>(encoding & 0xffff));
- break;
- }
- case kLiteral64KiB: {
- DCHECK(type_ == kLoadLiteralNarrow);
- int32_t mov_encoding = MovwEncoding32(rn_, GetOffset(code_size));
- int16_t add_pc_encoding = AddRdnRmEncoding16(rn_, PC);
- int16_t ldr_encoding = LdrRtRnImm5Encoding16(rn_, rn_, 0);
- buffer->Store<int16_t>(location_, mov_encoding >> 16);
- buffer->Store<int16_t>(location_ + 2u, static_cast<int16_t>(mov_encoding & 0xffff));
- buffer->Store<int16_t>(location_ + 4u, add_pc_encoding);
- buffer->Store<int16_t>(location_ + 6u, ldr_encoding);
- break;
- }
- case kLiteral1MiB: {
- DCHECK(type_ == kLoadLiteralNarrow);
- int32_t offset = GetOffset(code_size);
- int32_t mov_encoding = MovModImmEncoding32(rn_, offset & ~0xfff);
- int16_t add_pc_encoding = AddRdnRmEncoding16(rn_, PC);
- int32_t ldr_encoding = LdrRtRnImm12Encoding(rn_, rn_, offset & 0xfff);
- buffer->Store<int16_t>(emit_location, mov_encoding >> 16);
- buffer->Store<int16_t>(emit_location + 2u, static_cast<int16_t>(mov_encoding & 0xffff));
- buffer->Store<int16_t>(emit_location + 4u, add_pc_encoding);
- buffer->Store<int16_t>(emit_location + 6u, ldr_encoding >> 16);
- buffer->Store<int16_t>(emit_location + 8u, static_cast<int16_t>(ldr_encoding & 0xffff));
- break;
- }
- case kLiteralFar: {
- DCHECK(type_ == kLoadLiteralNarrow);
- int32_t offset = GetOffset(code_size);
- int32_t movw_encoding = MovwEncoding32(rn_, offset & 0xffff);
- int32_t movt_encoding = MovtEncoding32(rn_, offset & ~0xffff);
- int16_t add_pc_encoding = AddRdnRmEncoding16(rn_, PC);
- int32_t ldr_encoding = LdrRtRnImm12Encoding(rn_, rn_, 0);
- buffer->Store<int16_t>(emit_location, movw_encoding >> 16);
- buffer->Store<int16_t>(emit_location + 2u, static_cast<int16_t>(movw_encoding & 0xffff));
- buffer->Store<int16_t>(emit_location + 4u, movt_encoding >> 16);
- buffer->Store<int16_t>(emit_location + 6u, static_cast<int16_t>(movt_encoding & 0xffff));
- buffer->Store<int16_t>(emit_location + 8u, add_pc_encoding);
- buffer->Store<int16_t>(emit_location + 10u, ldr_encoding >> 16);
- buffer->Store<int16_t>(emit_location + 12u, static_cast<int16_t>(ldr_encoding & 0xffff));
- break;
- }
-
- case kLiteralAddr1KiB: {
- DCHECK(type_ == kLoadLiteralAddr);
- int16_t encoding = AdrEncoding16(rn_, GetOffset(code_size));
- buffer->Store<int16_t>(emit_location, encoding);
- break;
- }
- case kLiteralAddr4KiB: {
- DCHECK(type_ == kLoadLiteralAddr);
- int32_t encoding = AdrEncoding32(rn_, GetOffset(code_size));
- buffer->Store<int16_t>(emit_location, encoding >> 16);
- buffer->Store<int16_t>(emit_location + 2u, static_cast<int16_t>(encoding & 0xffff));
- break;
- }
- case kLiteralAddr64KiB: {
- DCHECK(type_ == kLoadLiteralAddr);
- int32_t mov_encoding = MovwEncoding32(rn_, GetOffset(code_size));
- int16_t add_pc_encoding = AddRdnRmEncoding16(rn_, PC);
- buffer->Store<int16_t>(emit_location, mov_encoding >> 16);
- buffer->Store<int16_t>(emit_location + 2u, static_cast<int16_t>(mov_encoding & 0xffff));
- buffer->Store<int16_t>(emit_location + 4u, add_pc_encoding);
- break;
- }
- case kLiteralAddrFar: {
- DCHECK(type_ == kLoadLiteralAddr);
- int32_t offset = GetOffset(code_size);
- int32_t movw_encoding = MovwEncoding32(rn_, offset & 0xffff);
- int32_t movt_encoding = MovtEncoding32(rn_, offset & ~0xffff);
- int16_t add_pc_encoding = AddRdnRmEncoding16(rn_, PC);
- buffer->Store<int16_t>(emit_location, movw_encoding >> 16);
- buffer->Store<int16_t>(emit_location + 2u, static_cast<int16_t>(movw_encoding & 0xffff));
- buffer->Store<int16_t>(emit_location + 4u, movt_encoding >> 16);
- buffer->Store<int16_t>(emit_location + 6u, static_cast<int16_t>(movt_encoding & 0xffff));
- buffer->Store<int16_t>(emit_location + 8u, add_pc_encoding);
- break;
- }
-
- case kLongOrFPLiteral1KiB: {
- int32_t encoding = LoadWideOrFpEncoding(PC, GetOffset(code_size)); // DCHECKs type_.
- buffer->Store<int16_t>(emit_location, encoding >> 16);
- buffer->Store<int16_t>(emit_location + 2u, static_cast<int16_t>(encoding & 0xffff));
- break;
- }
- case kLongOrFPLiteral64KiB: {
- int32_t mov_encoding = MovwEncoding32(IP, GetOffset(code_size));
- int16_t add_pc_encoding = AddRdnRmEncoding16(IP, PC);
- int32_t ldr_encoding = LoadWideOrFpEncoding(IP, 0u); // DCHECKs type_.
- buffer->Store<int16_t>(emit_location, mov_encoding >> 16);
- buffer->Store<int16_t>(emit_location + 2u, static_cast<int16_t>(mov_encoding & 0xffff));
- buffer->Store<int16_t>(emit_location + 4u, add_pc_encoding);
- buffer->Store<int16_t>(emit_location + 6u, ldr_encoding >> 16);
- buffer->Store<int16_t>(emit_location + 8u, static_cast<int16_t>(ldr_encoding & 0xffff));
- break;
- }
- case kLongOrFPLiteralFar: {
- int32_t offset = GetOffset(code_size);
- int32_t movw_encoding = MovwEncoding32(IP, offset & 0xffff);
- int32_t movt_encoding = MovtEncoding32(IP, offset & ~0xffff);
- int16_t add_pc_encoding = AddRdnRmEncoding16(IP, PC);
- int32_t ldr_encoding = LoadWideOrFpEncoding(IP, 0); // DCHECKs type_.
- buffer->Store<int16_t>(emit_location, movw_encoding >> 16);
- buffer->Store<int16_t>(emit_location + 2u, static_cast<int16_t>(movw_encoding & 0xffff));
- buffer->Store<int16_t>(emit_location + 4u, movt_encoding >> 16);
- buffer->Store<int16_t>(emit_location + 6u, static_cast<int16_t>(movt_encoding & 0xffff));
- buffer->Store<int16_t>(emit_location + 8u, add_pc_encoding);
- buffer->Store<int16_t>(emit_location + 10u, ldr_encoding >> 16);
- buffer->Store<int16_t>(emit_location + 12u, static_cast<int16_t>(ldr_encoding & 0xffff));
- break;
- }
- }
-}
-
-uint16_t Thumb2Assembler::EmitCompareAndBranch(Register rn, uint16_t prev, bool n) {
- CHECK(IsLowRegister(rn));
- uint32_t location = buffer_.Size();
-
- // This is always unresolved as it must be a forward branch.
- Emit16(prev); // Previous link.
- return AddFixup(Fixup::CompareAndBranch(location, rn, n ? NE : EQ));
-}
-
-
-// NOTE: this only support immediate offsets, not [rx,ry].
-// TODO: support [rx,ry] instructions.
-void Thumb2Assembler::EmitLoadStore(Condition cond,
- bool load,
- bool byte,
- bool half,
- bool is_signed,
- Register rd,
- const Address& ad) {
- CHECK_NE(rd, kNoRegister);
- CheckCondition(cond);
- bool must_be_32bit = force_32bit_;
- if (IsHighRegister(rd)) {
- must_be_32bit = true;
- }
-
- Register rn = ad.GetRegister();
- if (IsHighRegister(rn) && (byte || half || (rn != SP && rn != PC))) {
- must_be_32bit = true;
- }
-
- if (is_signed || ad.GetOffset() < 0 || ad.GetMode() != Address::Offset) {
- must_be_32bit = true;
- }
-
- if (ad.IsImmediate()) {
- // Immediate offset
- int32_t offset = ad.GetOffset();
-
- if (byte) {
- // 5 bit offset, no shift.
- if ((offset & ~0x1f) != 0) {
- must_be_32bit = true;
- }
- } else if (half) {
- // 5 bit offset, shifted by 1.
- if ((offset & ~(0x1f << 1)) != 0) {
- must_be_32bit = true;
- }
- } else if (rn == SP || rn == PC) {
- // The 16 bit SP/PC relative instruction can only have an (imm8 << 2) offset.
- if ((offset & ~(0xff << 2)) != 0) {
- must_be_32bit = true;
- }
- } else {
- // 5 bit offset, shifted by 2.
- if ((offset & ~(0x1f << 2)) != 0) {
- must_be_32bit = true;
- }
- }
-
- if (must_be_32bit) {
- int32_t encoding = B31 | B30 | B29 | B28 | B27 |
- (load ? B20 : 0) |
- (is_signed ? B24 : 0) |
- static_cast<uint32_t>(rd) << 12 |
- ad.encodingThumb(true) |
- (byte ? 0 : half ? B21 : B22);
- Emit32(encoding);
- } else {
- // 16 bit thumb1.
- uint8_t opA = 0;
- bool sp_or_pc_relative = false;
-
- if (byte) {
- opA = 7U /* 0b0111 */;
- } else if (half) {
- opA = 8U /* 0b1000 */;
- } else {
- if (rn == SP) {
- opA = 9U /* 0b1001 */;
- sp_or_pc_relative = true;
- } else if (rn == PC) {
- opA = 4U;
- sp_or_pc_relative = true;
- } else {
- opA = 6U /* 0b0110 */;
- }
- }
- int16_t encoding = opA << 12 |
- (load ? B11 : 0);
-
- CHECK_GE(offset, 0);
- if (sp_or_pc_relative) {
- // SP relative, 10 bit offset.
- CHECK_LT(offset, (1 << 10));
- CHECK_ALIGNED(offset, 4);
- encoding |= rd << 8 | offset >> 2;
- } else {
- // No SP relative. The offset is shifted right depending on
- // the size of the load/store.
- encoding |= static_cast<uint32_t>(rd);
-
- if (byte) {
- // 5 bit offset, no shift.
- CHECK_LT(offset, (1 << 5));
- } else if (half) {
- // 6 bit offset, shifted by 1.
- CHECK_LT(offset, (1 << 6));
- CHECK_ALIGNED(offset, 2);
- offset >>= 1;
- } else {
- // 7 bit offset, shifted by 2.
- CHECK_LT(offset, (1 << 7));
- CHECK_ALIGNED(offset, 4);
- offset >>= 2;
- }
- encoding |= rn << 3 | offset << 6;
- }
-
- Emit16(encoding);
- }
- } else {
- // Register shift.
- CHECK_NE(ad.GetRegister(), PC);
- if (ad.GetShiftCount() != 0) {
- // If there is a shift count this must be 32 bit.
- must_be_32bit = true;
- } else if (IsHighRegister(ad.GetRegisterOffset())) {
- must_be_32bit = true;
- }
-
- if (must_be_32bit) {
- int32_t encoding = 0x1f << 27 | (load ? B20 : 0) | static_cast<uint32_t>(rd) << 12 |
- ad.encodingThumb(true);
- if (half) {
- encoding |= B21;
- } else if (!byte) {
- encoding |= B22;
- }
- if (load && is_signed && (byte || half)) {
- encoding |= B24;
- }
- Emit32(encoding);
- } else {
- // 16 bit register offset.
- int32_t encoding = B14 | B12 | (load ? B11 : 0) | static_cast<uint32_t>(rd) |
- ad.encodingThumb(false);
- if (byte) {
- encoding |= B10;
- } else if (half) {
- encoding |= B9;
- }
- Emit16(encoding);
- }
- }
-}
-
-
-void Thumb2Assembler::EmitMultiMemOp(Condition cond,
- BlockAddressMode bam,
- bool load,
- Register base,
- RegList regs) {
- CHECK_NE(base, kNoRegister);
- CheckCondition(cond);
- bool must_be_32bit = force_32bit_;
-
- if (!must_be_32bit && base == SP && bam == (load ? IA_W : DB_W) &&
- (regs & 0xff00 & ~(1 << (load ? PC : LR))) == 0) {
- // Use 16-bit PUSH/POP.
- int16_t encoding = B15 | B13 | B12 | (load ? B11 : 0) | B10 |
- ((regs & (1 << (load ? PC : LR))) != 0 ? B8 : 0) | (regs & 0x00ff);
- Emit16(encoding);
- return;
- }
-
- if ((regs & 0xff00) != 0) {
- must_be_32bit = true;
- }
-
- bool w_bit = bam == IA_W || bam == DB_W || bam == DA_W || bam == IB_W;
- // 16 bit always uses writeback.
- if (!w_bit) {
- must_be_32bit = true;
- }
-
- if (must_be_32bit) {
- uint32_t op = 0;
- switch (bam) {
- case IA:
- case IA_W:
- op = 1U /* 0b01 */;
- break;
- case DB:
- case DB_W:
- op = 2U /* 0b10 */;
- break;
- case DA:
- case IB:
- case DA_W:
- case IB_W:
- LOG(FATAL) << "LDM/STM mode not supported on thumb: " << bam;
- UNREACHABLE();
- }
- if (load) {
- // Cannot have SP in the list.
- CHECK_EQ((regs & (1 << SP)), 0);
- } else {
- // Cannot have PC or SP in the list.
- CHECK_EQ((regs & (1 << PC | 1 << SP)), 0);
- }
- int32_t encoding = B31 | B30 | B29 | B27 |
- (op << 23) |
- (load ? B20 : 0) |
- base << 16 |
- regs |
- (w_bit << 21);
- Emit32(encoding);
- } else {
- int16_t encoding = B15 | B14 |
- (load ? B11 : 0) |
- base << 8 |
- regs;
- Emit16(encoding);
- }
-}
-
-void Thumb2Assembler::EmitBranch(Condition cond, Label* label, bool link, bool x) {
- bool use32bit = IsForced32Bit() || !CanRelocateBranches();
- uint32_t pc = buffer_.Size();
- Fixup::Type branch_type;
- if (cond == AL) {
- if (link) {
- use32bit = true;
- if (x) {
- branch_type = Fixup::kUnconditionalLinkX; // BLX.
- } else {
- branch_type = Fixup::kUnconditionalLink; // BX.
- }
- } else {
- branch_type = Fixup::kUnconditional; // B.
- // The T2 encoding offset is `SignExtend(imm11:'0', 32)` and there is a PC adjustment of 4.
- static constexpr size_t kMaxT2BackwardDistance = (1u << 11) - 4u;
- if (!use32bit && label->IsBound() && pc - label->Position() > kMaxT2BackwardDistance) {
- use32bit = true;
- }
- }
- } else {
- branch_type = Fixup::kConditional; // B<cond>.
- // The T1 encoding offset is `SignExtend(imm8:'0', 32)` and there is a PC adjustment of 4.
- static constexpr size_t kMaxT1BackwardDistance = (1u << 8) - 4u;
- if (!use32bit && label->IsBound() && pc - label->Position() > kMaxT1BackwardDistance) {
- use32bit = true;
- }
- }
-
- Fixup::Size size = use32bit ? Fixup::kBranch32Bit : Fixup::kBranch16Bit;
- FixupId branch_id = AddFixup(Fixup::Branch(pc, branch_type, size, cond));
-
- if (label->IsBound()) {
- // The branch is to a bound label which means that it's a backwards branch.
- GetFixup(branch_id)->Resolve(label->Position());
- Emit16(0);
- } else {
- // Branch target is an unbound label. Add it to a singly-linked list maintained within
- // the code with the label serving as the head.
- Emit16(static_cast<uint16_t>(label->position_));
- label->LinkTo(branch_id);
- }
-
- if (use32bit) {
- Emit16(0);
- }
- DCHECK_EQ(buffer_.Size() - pc, GetFixup(branch_id)->GetSizeInBytes());
-}
-
-
-void Thumb2Assembler::Emit32Miscellaneous(uint8_t op1,
- uint8_t op2,
- uint32_t rest_encoding) {
- int32_t encoding = B31 | B30 | B29 | B28 | B27 | B25 | B23 |
- op1 << 20 |
- 0xf << 12 |
- B7 |
- op2 << 4 |
- rest_encoding;
- Emit32(encoding);
-}
-
-
-void Thumb2Assembler::Emit16Miscellaneous(uint32_t rest_encoding) {
- int16_t encoding = B15 | B13 | B12 |
- rest_encoding;
- Emit16(encoding);
-}
-
-void Thumb2Assembler::clz(Register rd, Register rm, Condition cond) {
- CHECK_NE(rd, kNoRegister);
- CHECK_NE(rm, kNoRegister);
- CheckCondition(cond);
- CHECK_NE(rd, PC);
- CHECK_NE(rm, PC);
- int32_t encoding =
- static_cast<uint32_t>(rm) << 16 |
- static_cast<uint32_t>(rd) << 8 |
- static_cast<uint32_t>(rm);
- Emit32Miscellaneous(0b11, 0b00, encoding);
-}
-
-
-void Thumb2Assembler::movw(Register rd, uint16_t imm16, Condition cond) {
- CheckCondition(cond);
- // Always 32 bits, encoding T3. (Other encondings are called MOV, not MOVW.)
- uint32_t imm4 = (imm16 >> 12) & 15U /* 0b1111 */;
- uint32_t i = (imm16 >> 11) & 1U /* 0b1 */;
- uint32_t imm3 = (imm16 >> 8) & 7U /* 0b111 */;
- uint32_t imm8 = imm16 & 0xff;
- int32_t encoding = B31 | B30 | B29 | B28 |
- B25 | B22 |
- static_cast<uint32_t>(rd) << 8 |
- i << 26 |
- imm4 << 16 |
- imm3 << 12 |
- imm8;
- Emit32(encoding);
-}
-
-
-void Thumb2Assembler::movt(Register rd, uint16_t imm16, Condition cond) {
- CheckCondition(cond);
- // Always 32 bits.
- uint32_t imm4 = (imm16 >> 12) & 15U /* 0b1111 */;
- uint32_t i = (imm16 >> 11) & 1U /* 0b1 */;
- uint32_t imm3 = (imm16 >> 8) & 7U /* 0b111 */;
- uint32_t imm8 = imm16 & 0xff;
- int32_t encoding = B31 | B30 | B29 | B28 |
- B25 | B23 | B22 |
- static_cast<uint32_t>(rd) << 8 |
- i << 26 |
- imm4 << 16 |
- imm3 << 12 |
- imm8;
- Emit32(encoding);
-}
-
-
-void Thumb2Assembler::rbit(Register rd, Register rm, Condition cond) {
- CHECK_NE(rd, kNoRegister);
- CHECK_NE(rm, kNoRegister);
- CheckCondition(cond);
- CHECK_NE(rd, PC);
- CHECK_NE(rm, PC);
- CHECK_NE(rd, SP);
- CHECK_NE(rm, SP);
- int32_t encoding =
- static_cast<uint32_t>(rm) << 16 |
- static_cast<uint32_t>(rd) << 8 |
- static_cast<uint32_t>(rm);
-
- Emit32Miscellaneous(0b01, 0b10, encoding);
-}
-
-
-void Thumb2Assembler::EmitReverseBytes(Register rd, Register rm,
- uint32_t op) {
- CHECK_NE(rd, kNoRegister);
- CHECK_NE(rm, kNoRegister);
- CHECK_NE(rd, PC);
- CHECK_NE(rm, PC);
- CHECK_NE(rd, SP);
- CHECK_NE(rm, SP);
-
- if (!IsHighRegister(rd) && !IsHighRegister(rm) && !force_32bit_) {
- uint16_t t1_op = B11 | B9 | (op << 6);
- int16_t encoding = t1_op |
- static_cast<uint16_t>(rm) << 3 |
- static_cast<uint16_t>(rd);
- Emit16Miscellaneous(encoding);
- } else {
- int32_t encoding =
- static_cast<uint32_t>(rm) << 16 |
- static_cast<uint32_t>(rd) << 8 |
- static_cast<uint32_t>(rm);
- Emit32Miscellaneous(0b01, op, encoding);
- }
-}
-
-
-void Thumb2Assembler::rev(Register rd, Register rm, Condition cond) {
- CheckCondition(cond);
- EmitReverseBytes(rd, rm, 0b00);
-}
-
-
-void Thumb2Assembler::rev16(Register rd, Register rm, Condition cond) {
- CheckCondition(cond);
- EmitReverseBytes(rd, rm, 0b01);
-}
-
-
-void Thumb2Assembler::revsh(Register rd, Register rm, Condition cond) {
- CheckCondition(cond);
- EmitReverseBytes(rd, rm, 0b11);
-}
-
-
-void Thumb2Assembler::ldrex(Register rt, Register rn, uint16_t imm, Condition cond) {
- CHECK_NE(rn, kNoRegister);
- CHECK_NE(rt, kNoRegister);
- CheckCondition(cond);
- CHECK_LT(imm, (1u << 10));
-
- int32_t encoding = B31 | B30 | B29 | B27 | B22 | B20 |
- static_cast<uint32_t>(rn) << 16 |
- static_cast<uint32_t>(rt) << 12 |
- 0xf << 8 |
- imm >> 2;
- Emit32(encoding);
-}
-
-
-void Thumb2Assembler::ldrex(Register rt, Register rn, Condition cond) {
- ldrex(rt, rn, 0, cond);
-}
-
-
-void Thumb2Assembler::strex(Register rd,
- Register rt,
- Register rn,
- uint16_t imm,
- Condition cond) {
- CHECK_NE(rn, kNoRegister);
- CHECK_NE(rd, kNoRegister);
- CHECK_NE(rt, kNoRegister);
- CheckCondition(cond);
- CHECK_LT(imm, (1u << 10));
-
- int32_t encoding = B31 | B30 | B29 | B27 | B22 |
- static_cast<uint32_t>(rn) << 16 |
- static_cast<uint32_t>(rt) << 12 |
- static_cast<uint32_t>(rd) << 8 |
- imm >> 2;
- Emit32(encoding);
-}
-
-
-void Thumb2Assembler::ldrexd(Register rt, Register rt2, Register rn, Condition cond) {
- CHECK_NE(rn, kNoRegister);
- CHECK_NE(rt, kNoRegister);
- CHECK_NE(rt2, kNoRegister);
- CHECK_NE(rt, rt2);
- CheckCondition(cond);
-
- int32_t encoding = B31 | B30 | B29 | B27 | B23 | B22 | B20 |
- static_cast<uint32_t>(rn) << 16 |
- static_cast<uint32_t>(rt) << 12 |
- static_cast<uint32_t>(rt2) << 8 |
- B6 | B5 | B4 | B3 | B2 | B1 | B0;
- Emit32(encoding);
-}
-
-
-void Thumb2Assembler::strex(Register rd,
- Register rt,
- Register rn,
- Condition cond) {
- strex(rd, rt, rn, 0, cond);
-}
-
-
-void Thumb2Assembler::strexd(Register rd, Register rt, Register rt2, Register rn, Condition cond) {
- CHECK_NE(rd, kNoRegister);
- CHECK_NE(rn, kNoRegister);
- CHECK_NE(rt, kNoRegister);
- CHECK_NE(rt2, kNoRegister);
- CHECK_NE(rt, rt2);
- CHECK_NE(rd, rt);
- CHECK_NE(rd, rt2);
- CheckCondition(cond);
-
- int32_t encoding = B31 | B30 | B29 | B27 | B23 | B22 |
- static_cast<uint32_t>(rn) << 16 |
- static_cast<uint32_t>(rt) << 12 |
- static_cast<uint32_t>(rt2) << 8 |
- B6 | B5 | B4 |
- static_cast<uint32_t>(rd);
- Emit32(encoding);
-}
-
-
-void Thumb2Assembler::clrex(Condition cond) {
- CheckCondition(cond);
- int32_t encoding = B31 | B30 | B29 | B28 | B25 | B24 | B23 |
- B21 | B20 |
- 0xf << 16 |
- B15 |
- 0xf << 8 |
- B5 |
- 0xf;
- Emit32(encoding);
-}
-
-
-void Thumb2Assembler::nop(Condition cond) {
- CheckCondition(cond);
- uint16_t encoding = B15 | B13 | B12 |
- B11 | B10 | B9 | B8;
- Emit16(static_cast<int16_t>(encoding));
-}
-
-
-void Thumb2Assembler::vmovsr(SRegister sn, Register rt, Condition cond) {
- CHECK_NE(sn, kNoSRegister);
- CHECK_NE(rt, kNoRegister);
- CHECK_NE(rt, SP);
- CHECK_NE(rt, PC);
- CheckCondition(cond);
- int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
- B27 | B26 | B25 |
- ((static_cast<int32_t>(sn) >> 1)*B16) |
- (static_cast<int32_t>(rt)*B12) | B11 | B9 |
- ((static_cast<int32_t>(sn) & 1)*B7) | B4;
- Emit32(encoding);
-}
-
-
-void Thumb2Assembler::vmovrs(Register rt, SRegister sn, Condition cond) {
- CHECK_NE(sn, kNoSRegister);
- CHECK_NE(rt, kNoRegister);
- CHECK_NE(rt, SP);
- CHECK_NE(rt, PC);
- CheckCondition(cond);
- int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
- B27 | B26 | B25 | B20 |
- ((static_cast<int32_t>(sn) >> 1)*B16) |
- (static_cast<int32_t>(rt)*B12) | B11 | B9 |
- ((static_cast<int32_t>(sn) & 1)*B7) | B4;
- Emit32(encoding);
-}
-
-
-void Thumb2Assembler::vmovsrr(SRegister sm, Register rt, Register rt2,
- Condition cond) {
- CHECK_NE(sm, kNoSRegister);
- CHECK_NE(sm, S31);
- CHECK_NE(rt, kNoRegister);
- CHECK_NE(rt, SP);
- CHECK_NE(rt, PC);
- CHECK_NE(rt2, kNoRegister);
- CHECK_NE(rt2, SP);
- CHECK_NE(rt2, PC);
- CheckCondition(cond);
- int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
- B27 | B26 | B22 |
- (static_cast<int32_t>(rt2)*B16) |
- (static_cast<int32_t>(rt)*B12) | B11 | B9 |
- ((static_cast<int32_t>(sm) & 1)*B5) | B4 |
- (static_cast<int32_t>(sm) >> 1);
- Emit32(encoding);
-}
-
-
-void Thumb2Assembler::vmovrrs(Register rt, Register rt2, SRegister sm,
- Condition cond) {
- CHECK_NE(sm, kNoSRegister);
- CHECK_NE(sm, S31);
- CHECK_NE(rt, kNoRegister);
- CHECK_NE(rt, SP);
- CHECK_NE(rt, PC);
- CHECK_NE(rt2, kNoRegister);
- CHECK_NE(rt2, SP);
- CHECK_NE(rt2, PC);
- CHECK_NE(rt, rt2);
- CheckCondition(cond);
- int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
- B27 | B26 | B22 | B20 |
- (static_cast<int32_t>(rt2)*B16) |
- (static_cast<int32_t>(rt)*B12) | B11 | B9 |
- ((static_cast<int32_t>(sm) & 1)*B5) | B4 |
- (static_cast<int32_t>(sm) >> 1);
- Emit32(encoding);
-}
-
-
-void Thumb2Assembler::vmovdrr(DRegister dm, Register rt, Register rt2,
- Condition cond) {
- CHECK_NE(dm, kNoDRegister);
- CHECK_NE(rt, kNoRegister);
- CHECK_NE(rt, SP);
- CHECK_NE(rt, PC);
- CHECK_NE(rt2, kNoRegister);
- CHECK_NE(rt2, SP);
- CHECK_NE(rt2, PC);
- CheckCondition(cond);
- int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
- B27 | B26 | B22 |
- (static_cast<int32_t>(rt2)*B16) |
- (static_cast<int32_t>(rt)*B12) | B11 | B9 | B8 |
- ((static_cast<int32_t>(dm) >> 4)*B5) | B4 |
- (static_cast<int32_t>(dm) & 0xf);
- Emit32(encoding);
-}
-
-
-void Thumb2Assembler::vmovrrd(Register rt, Register rt2, DRegister dm,
- Condition cond) {
- CHECK_NE(dm, kNoDRegister);
- CHECK_NE(rt, kNoRegister);
- CHECK_NE(rt, SP);
- CHECK_NE(rt, PC);
- CHECK_NE(rt2, kNoRegister);
- CHECK_NE(rt2, SP);
- CHECK_NE(rt2, PC);
- CHECK_NE(rt, rt2);
- CheckCondition(cond);
- int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
- B27 | B26 | B22 | B20 |
- (static_cast<int32_t>(rt2)*B16) |
- (static_cast<int32_t>(rt)*B12) | B11 | B9 | B8 |
- ((static_cast<int32_t>(dm) >> 4)*B5) | B4 |
- (static_cast<int32_t>(dm) & 0xf);
- Emit32(encoding);
-}
-
-
-void Thumb2Assembler::vldrs(SRegister sd, const Address& ad, Condition cond) {
- const Address& addr = static_cast<const Address&>(ad);
- CHECK_NE(sd, kNoSRegister);
- CheckCondition(cond);
- int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
- B27 | B26 | B24 | B20 |
- ((static_cast<int32_t>(sd) & 1)*B22) |
- ((static_cast<int32_t>(sd) >> 1)*B12) |
- B11 | B9 | addr.vencoding();
- Emit32(encoding);
-}
-
-
-void Thumb2Assembler::vstrs(SRegister sd, const Address& ad, Condition cond) {
- const Address& addr = static_cast<const Address&>(ad);
- CHECK_NE(static_cast<Register>(addr.encodingArm() & (0xf << kRnShift)), PC);
- CHECK_NE(sd, kNoSRegister);
- CheckCondition(cond);
- int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
- B27 | B26 | B24 |
- ((static_cast<int32_t>(sd) & 1)*B22) |
- ((static_cast<int32_t>(sd) >> 1)*B12) |
- B11 | B9 | addr.vencoding();
- Emit32(encoding);
-}
-
-
-void Thumb2Assembler::vldrd(DRegister dd, const Address& ad, Condition cond) {
- const Address& addr = static_cast<const Address&>(ad);
- CHECK_NE(dd, kNoDRegister);
- CheckCondition(cond);
- int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
- B27 | B26 | B24 | B20 |
- ((static_cast<int32_t>(dd) >> 4)*B22) |
- ((static_cast<int32_t>(dd) & 0xf)*B12) |
- B11 | B9 | B8 | addr.vencoding();
- Emit32(encoding);
-}
-
-
-void Thumb2Assembler::vstrd(DRegister dd, const Address& ad, Condition cond) {
- const Address& addr = static_cast<const Address&>(ad);
- CHECK_NE(static_cast<Register>(addr.encodingArm() & (0xf << kRnShift)), PC);
- CHECK_NE(dd, kNoDRegister);
- CheckCondition(cond);
- int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
- B27 | B26 | B24 |
- ((static_cast<int32_t>(dd) >> 4)*B22) |
- ((static_cast<int32_t>(dd) & 0xf)*B12) |
- B11 | B9 | B8 | addr.vencoding();
- Emit32(encoding);
-}
-
-
-void Thumb2Assembler::vpushs(SRegister reg, int nregs, Condition cond) {
- EmitVPushPop(static_cast<uint32_t>(reg), nregs, true, false, cond);
-}
-
-
-void Thumb2Assembler::vpushd(DRegister reg, int nregs, Condition cond) {
- EmitVPushPop(static_cast<uint32_t>(reg), nregs, true, true, cond);
-}
-
-
-void Thumb2Assembler::vpops(SRegister reg, int nregs, Condition cond) {
- EmitVPushPop(static_cast<uint32_t>(reg), nregs, false, false, cond);
-}
-
-
-void Thumb2Assembler::vpopd(DRegister reg, int nregs, Condition cond) {
- EmitVPushPop(static_cast<uint32_t>(reg), nregs, false, true, cond);
-}
-
-
-void Thumb2Assembler::vldmiad(Register base_reg, DRegister reg, int nregs, Condition cond) {
- int32_t rest = B23;
- EmitVLdmOrStm(rest,
- static_cast<uint32_t>(reg),
- nregs,
- base_reg,
- /*is_load*/ true,
- /*dbl*/ true,
- cond);
-}
-
-
-void Thumb2Assembler::vstmiad(Register base_reg, DRegister reg, int nregs, Condition cond) {
- int32_t rest = B23;
- EmitVLdmOrStm(rest,
- static_cast<uint32_t>(reg),
- nregs,
- base_reg,
- /*is_load*/ false,
- /*dbl*/ true,
- cond);
-}
-
-
-void Thumb2Assembler::EmitVPushPop(uint32_t reg, int nregs, bool push, bool dbl, Condition cond) {
- int32_t rest = B21 | (push ? B24 : B23);
- EmitVLdmOrStm(rest, reg, nregs, SP, /*is_load*/ !push, dbl, cond);
-}
-
-
-void Thumb2Assembler::EmitVLdmOrStm(int32_t rest,
- uint32_t reg,
- int nregs,
- Register rn,
- bool is_load,
- bool dbl,
- Condition cond) {
- CheckCondition(cond);
-
- DCHECK_GT(nregs, 0);
- DCHECK_LE(reg + nregs, 32u);
- DCHECK(!dbl || (nregs <= 16));
-
- uint32_t D;
- uint32_t Vd;
- if (dbl) {
- // Encoded as D:Vd.
- D = (reg >> 4) & 1;
- Vd = reg & 15U /* 0b1111 */;
- } else {
- // Encoded as Vd:D.
- D = reg & 1;
- Vd = (reg >> 1) & 15U /* 0b1111 */;
- }
-
- int32_t encoding = rest |
- 14U /* 0b1110 */ << 28 |
- B27 | B26 | B11 | B9 |
- (is_load ? B20 : 0) |
- static_cast<int16_t>(rn) << 16 |
- D << 22 |
- Vd << 12 |
- (dbl ? B8 : 0) |
- nregs << (dbl ? 1 : 0);
-
- Emit32(encoding);
-}
-
-
-void Thumb2Assembler::EmitVFPsss(Condition cond, int32_t opcode,
- SRegister sd, SRegister sn, SRegister sm) {
- CHECK_NE(sd, kNoSRegister);
- CHECK_NE(sn, kNoSRegister);
- CHECK_NE(sm, kNoSRegister);
- CheckCondition(cond);
- int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
- B27 | B26 | B25 | B11 | B9 | opcode |
- ((static_cast<int32_t>(sd) & 1)*B22) |
- ((static_cast<int32_t>(sn) >> 1)*B16) |
- ((static_cast<int32_t>(sd) >> 1)*B12) |
- ((static_cast<int32_t>(sn) & 1)*B7) |
- ((static_cast<int32_t>(sm) & 1)*B5) |
- (static_cast<int32_t>(sm) >> 1);
- Emit32(encoding);
-}
-
-
-void Thumb2Assembler::EmitVFPddd(Condition cond, int32_t opcode,
- DRegister dd, DRegister dn, DRegister dm) {
- CHECK_NE(dd, kNoDRegister);
- CHECK_NE(dn, kNoDRegister);
- CHECK_NE(dm, kNoDRegister);
- CheckCondition(cond);
- int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
- B27 | B26 | B25 | B11 | B9 | B8 | opcode |
- ((static_cast<int32_t>(dd) >> 4)*B22) |
- ((static_cast<int32_t>(dn) & 0xf)*B16) |
- ((static_cast<int32_t>(dd) & 0xf)*B12) |
- ((static_cast<int32_t>(dn) >> 4)*B7) |
- ((static_cast<int32_t>(dm) >> 4)*B5) |
- (static_cast<int32_t>(dm) & 0xf);
- Emit32(encoding);
-}
-
-
-void Thumb2Assembler::EmitVFPsd(Condition cond, int32_t opcode,
- SRegister sd, DRegister dm) {
- CHECK_NE(sd, kNoSRegister);
- CHECK_NE(dm, kNoDRegister);
- CheckCondition(cond);
- int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
- B27 | B26 | B25 | B11 | B9 | opcode |
- ((static_cast<int32_t>(sd) & 1)*B22) |
- ((static_cast<int32_t>(sd) >> 1)*B12) |
- ((static_cast<int32_t>(dm) >> 4)*B5) |
- (static_cast<int32_t>(dm) & 0xf);
- Emit32(encoding);
-}
-
-
-void Thumb2Assembler::EmitVFPds(Condition cond, int32_t opcode,
- DRegister dd, SRegister sm) {
- CHECK_NE(dd, kNoDRegister);
- CHECK_NE(sm, kNoSRegister);
- CheckCondition(cond);
- int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
- B27 | B26 | B25 | B11 | B9 | opcode |
- ((static_cast<int32_t>(dd) >> 4)*B22) |
- ((static_cast<int32_t>(dd) & 0xf)*B12) |
- ((static_cast<int32_t>(sm) & 1)*B5) |
- (static_cast<int32_t>(sm) >> 1);
- Emit32(encoding);
-}
-
-
-void Thumb2Assembler::vmstat(Condition cond) { // VMRS APSR_nzcv, FPSCR.
- CHECK_NE(cond, kNoCondition);
- CheckCondition(cond);
- int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
- B27 | B26 | B25 | B23 | B22 | B21 | B20 | B16 |
- (static_cast<int32_t>(PC)*B12) |
- B11 | B9 | B4;
- Emit32(encoding);
-}
-
-void Thumb2Assembler::vcntd(DRegister dd, DRegister dm) {
- uint32_t encoding = (B31 | B30 | B29 | B28 | B27 | B26 | B25 | B24 | B23 | B21 | B20) |
- ((static_cast<int32_t>(dd) >> 4) * B22) |
- ((static_cast<uint32_t>(dd) & 0xf) * B12) |
- (B10 | B8) |
- ((static_cast<int32_t>(dm) >> 4) * B5) |
- (static_cast<uint32_t>(dm) & 0xf);
-
- Emit32(encoding);
-}
-
-void Thumb2Assembler::vpaddld(DRegister dd, DRegister dm, int32_t size, bool is_unsigned) {
- CHECK(size == 8 || size == 16 || size == 32) << size;
- uint32_t encoding = (B31 | B30 | B29 | B28 | B27 | B26 | B25 | B24 | B23 | B21 | B20) |
- ((static_cast<uint32_t>(size >> 4) & 0x3) * B18) |
- ((static_cast<int32_t>(dd) >> 4) * B22) |
- ((static_cast<uint32_t>(dd) & 0xf) * B12) |
- (B9) |
- (is_unsigned ? B7 : 0) |
- ((static_cast<int32_t>(dm) >> 4) * B5) |
- (static_cast<uint32_t>(dm) & 0xf);
-
- Emit32(encoding);
-}
-
-void Thumb2Assembler::svc(uint32_t imm8) {
- CHECK(IsUint<8>(imm8)) << imm8;
- int16_t encoding = B15 | B14 | B12 |
- B11 | B10 | B9 | B8 |
- imm8;
- Emit16(encoding);
-}
-
-
-void Thumb2Assembler::bkpt(uint16_t imm8) {
- CHECK(IsUint<8>(imm8)) << imm8;
- int16_t encoding = B15 | B13 | B12 |
- B11 | B10 | B9 |
- imm8;
- Emit16(encoding);
-}
-
-// Convert the given IT state to a mask bit given bit 0 of the first
-// condition and a shift position.
-static uint8_t ToItMask(ItState s, uint8_t firstcond0, uint8_t shift) {
- switch (s) {
- case kItOmitted: return 1 << shift;
- case kItThen: return firstcond0 << shift;
- case kItElse: return !firstcond0 << shift;
- }
- return 0;
-}
-
-
-// Set the IT condition in the given position for the given state. This is used
-// to check that conditional instructions match the preceding IT statement.
-void Thumb2Assembler::SetItCondition(ItState s, Condition cond, uint8_t index) {
- switch (s) {
- case kItOmitted: it_conditions_[index] = AL; break;
- case kItThen: it_conditions_[index] = cond; break;
- case kItElse:
- it_conditions_[index] = static_cast<Condition>(static_cast<uint8_t>(cond) ^ 1);
- break;
- }
-}
-
-
-void Thumb2Assembler::it(Condition firstcond, ItState i1, ItState i2, ItState i3) {
- CheckCondition(AL); // Not allowed in IT block.
- uint8_t firstcond0 = static_cast<uint8_t>(firstcond) & 1;
-
- // All conditions to AL.
- for (uint8_t i = 0; i < 4; ++i) {
- it_conditions_[i] = AL;
- }
-
- SetItCondition(kItThen, firstcond, 0);
- uint8_t mask = ToItMask(i1, firstcond0, 3);
- SetItCondition(i1, firstcond, 1);
-
- if (i1 != kItOmitted) {
- mask |= ToItMask(i2, firstcond0, 2);
- SetItCondition(i2, firstcond, 2);
- if (i2 != kItOmitted) {
- mask |= ToItMask(i3, firstcond0, 1);
- SetItCondition(i3, firstcond, 3);
- if (i3 != kItOmitted) {
- mask |= 1U /* 0b0001 */;
- }
- }
- }
-
- // Start at first condition.
- it_cond_index_ = 0;
- next_condition_ = it_conditions_[0];
- uint16_t encoding = B15 | B13 | B12 |
- B11 | B10 | B9 | B8 |
- firstcond << 4 |
- mask;
- Emit16(encoding);
-}
-
-
-void Thumb2Assembler::cbz(Register rn, Label* label) {
- CheckCondition(AL);
- if (label->IsBound()) {
- LOG(FATAL) << "cbz can only be used to branch forwards";
- UNREACHABLE();
- } else if (IsHighRegister(rn)) {
- LOG(FATAL) << "cbz can only be used with low registers";
- UNREACHABLE();
- } else {
- uint16_t branchid = EmitCompareAndBranch(rn, static_cast<uint16_t>(label->position_), false);
- label->LinkTo(branchid);
- }
-}
-
-
-void Thumb2Assembler::cbnz(Register rn, Label* label) {
- CheckCondition(AL);
- if (label->IsBound()) {
- LOG(FATAL) << "cbnz can only be used to branch forwards";
- UNREACHABLE();
- } else if (IsHighRegister(rn)) {
- LOG(FATAL) << "cbnz can only be used with low registers";
- UNREACHABLE();
- } else {
- uint16_t branchid = EmitCompareAndBranch(rn, static_cast<uint16_t>(label->position_), true);
- label->LinkTo(branchid);
- }
-}
-
-
-void Thumb2Assembler::blx(Register rm, Condition cond) {
- CHECK_NE(rm, kNoRegister);
- CheckCondition(cond);
- int16_t encoding = B14 | B10 | B9 | B8 | B7 | static_cast<int16_t>(rm) << 3;
- Emit16(encoding);
-}
-
-
-void Thumb2Assembler::bx(Register rm, Condition cond) {
- CHECK_NE(rm, kNoRegister);
- CheckCondition(cond);
- int16_t encoding = B14 | B10 | B9 | B8 | static_cast<int16_t>(rm) << 3;
- Emit16(encoding);
-}
-
-
-void Thumb2Assembler::AdrCode(Register rt, Label* label) {
- uint32_t pc = buffer_.Size();
- FixupId branch_id = AddFixup(Fixup::LoadCodeAddress(pc, rt));
- CHECK(!label->IsBound());
- // ADR target must be an unbound label. Add it to a singly-linked list maintained within
- // the code with the label serving as the head.
- Emit16(static_cast<uint16_t>(label->position_));
- label->LinkTo(branch_id);
- Emit16(0);
- DCHECK_EQ(buffer_.Size() - pc, GetFixup(branch_id)->GetSizeInBytes());
-}
-
-
-void Thumb2Assembler::Push(Register rd, Condition cond) {
- str(rd, Address(SP, -kRegisterSize, Address::PreIndex), cond);
-}
-
-
-void Thumb2Assembler::Pop(Register rd, Condition cond) {
- ldr(rd, Address(SP, kRegisterSize, Address::PostIndex), cond);
-}
-
-
-void Thumb2Assembler::PushList(RegList regs, Condition cond) {
- stm(DB_W, SP, regs, cond);
-}
-
-
-void Thumb2Assembler::PopList(RegList regs, Condition cond) {
- ldm(IA_W, SP, regs, cond);
-}
-
-void Thumb2Assembler::StoreList(RegList regs, size_t stack_offset) {
- DCHECK_NE(regs, 0u);
- DCHECK_EQ(regs & (1u << IP), 0u);
- if (IsPowerOfTwo(regs)) {
- Register reg = static_cast<Register>(CTZ(static_cast<uint32_t>(regs)));
- str(reg, Address(SP, stack_offset));
- } else {
- add(IP, SP, ShifterOperand(stack_offset));
- stm(IA, IP, regs);
- }
-}
-
-void Thumb2Assembler::LoadList(RegList regs, size_t stack_offset) {
- DCHECK_NE(regs, 0u);
- DCHECK_EQ(regs & (1u << IP), 0u);
- if (IsPowerOfTwo(regs)) {
- Register reg = static_cast<Register>(CTZ(static_cast<uint32_t>(regs)));
- ldr(reg, Address(SP, stack_offset));
- } else {
- Register lowest_reg = static_cast<Register>(CTZ(static_cast<uint32_t>(regs)));
- add(lowest_reg, SP, ShifterOperand(stack_offset));
- ldm(IA, lowest_reg, regs);
- }
-}
-
-void Thumb2Assembler::Mov(Register rd, Register rm, Condition cond) {
- if (cond != AL || rd != rm) {
- mov(rd, ShifterOperand(rm), cond);
- }
-}
-
-
-void Thumb2Assembler::Bind(Label* label) {
- BindLabel(label, buffer_.Size());
-
- // Try to emit some Fixups now to reduce the memory needed during the branch fixup later.
- while (!fixups_.empty() && fixups_.back().IsCandidateForEmitEarly()) {
- const Fixup& last_fixup = fixups_.back();
- // Fixups are ordered by location, so the candidate can surely be emitted if it is
- // a forward branch. If it's a backward branch, it may go over any number of other
- // fixups. We could check for any number of emit early candidates but we want this
- // heuristics to be quick, so check just one.
- uint32_t target = last_fixup.GetTarget();
- if (target < last_fixup.GetLocation() &&
- fixups_.size() >= 2u &&
- fixups_[fixups_.size() - 2u].GetLocation() >= target) {
- const Fixup& prev_fixup = fixups_[fixups_.size() - 2u];
- if (!prev_fixup.IsCandidateForEmitEarly()) {
- break;
- }
- uint32_t min_target = std::min(target, prev_fixup.GetTarget());
- if (fixups_.size() >= 3u && fixups_[fixups_.size() - 3u].GetLocation() >= min_target) {
- break;
- }
- }
- last_fixup.Emit(last_fixup.GetLocation(), &buffer_, buffer_.Size());
- fixups_.pop_back();
- }
-}
-
-
-void Thumb2Assembler::Lsl(Register rd, Register rm, uint32_t shift_imm,
- Condition cond, SetCc set_cc) {
- CHECK_LE(shift_imm, 31u);
- CheckCondition(cond);
- EmitShift(rd, rm, LSL, shift_imm, cond, set_cc);
-}
-
-
-void Thumb2Assembler::Lsr(Register rd, Register rm, uint32_t shift_imm,
- Condition cond, SetCc set_cc) {
- CHECK(1u <= shift_imm && shift_imm <= 32u);
- if (shift_imm == 32) shift_imm = 0; // Comply to UAL syntax.
- CheckCondition(cond);
- EmitShift(rd, rm, LSR, shift_imm, cond, set_cc);
-}
-
-
-void Thumb2Assembler::Asr(Register rd, Register rm, uint32_t shift_imm,
- Condition cond, SetCc set_cc) {
- CHECK(1u <= shift_imm && shift_imm <= 32u);
- if (shift_imm == 32) shift_imm = 0; // Comply to UAL syntax.
- CheckCondition(cond);
- EmitShift(rd, rm, ASR, shift_imm, cond, set_cc);
-}
-
-
-void Thumb2Assembler::Ror(Register rd, Register rm, uint32_t shift_imm,
- Condition cond, SetCc set_cc) {
- CHECK(1u <= shift_imm && shift_imm <= 31u);
- CheckCondition(cond);
- EmitShift(rd, rm, ROR, shift_imm, cond, set_cc);
-}
-
-
-void Thumb2Assembler::Rrx(Register rd, Register rm, Condition cond, SetCc set_cc) {
- CheckCondition(cond);
- EmitShift(rd, rm, RRX, 0, cond, set_cc);
-}
-
-
-void Thumb2Assembler::Lsl(Register rd, Register rm, Register rn,
- Condition cond, SetCc set_cc) {
- CheckCondition(cond);
- EmitShift(rd, rm, LSL, rn, cond, set_cc);
-}
-
-
-void Thumb2Assembler::Lsr(Register rd, Register rm, Register rn,
- Condition cond, SetCc set_cc) {
- CheckCondition(cond);
- EmitShift(rd, rm, LSR, rn, cond, set_cc);
-}
-
-
-void Thumb2Assembler::Asr(Register rd, Register rm, Register rn,
- Condition cond, SetCc set_cc) {
- CheckCondition(cond);
- EmitShift(rd, rm, ASR, rn, cond, set_cc);
-}
-
-
-void Thumb2Assembler::Ror(Register rd, Register rm, Register rn,
- Condition cond, SetCc set_cc) {
- CheckCondition(cond);
- EmitShift(rd, rm, ROR, rn, cond, set_cc);
-}
-
-
-int32_t Thumb2Assembler::EncodeBranchOffset(int32_t offset, int32_t inst) {
- // The offset is off by 4 due to the way the ARM CPUs read PC.
- offset -= 4;
- offset >>= 1;
-
- uint32_t value = 0;
- // There are two different encodings depending on the value of bit 12. In one case
- // intermediate values are calculated using the sign bit.
- if ((inst & B12) == B12) {
- // 25 bits of offset.
- uint32_t signbit = (offset >> 31) & 0x1;
- uint32_t i1 = (offset >> 22) & 0x1;
- uint32_t i2 = (offset >> 21) & 0x1;
- uint32_t imm10 = (offset >> 11) & 0x03ff;
- uint32_t imm11 = offset & 0x07ff;
- uint32_t j1 = (i1 ^ signbit) ? 0 : 1;
- uint32_t j2 = (i2 ^ signbit) ? 0 : 1;
- value = (signbit << 26) | (j1 << 13) | (j2 << 11) | (imm10 << 16) |
- imm11;
- // Remove the offset from the current encoding.
- inst &= ~(0x3ff << 16 | 0x7ff);
- } else {
- uint32_t signbit = (offset >> 31) & 0x1;
- uint32_t imm6 = (offset >> 11) & 0x03f;
- uint32_t imm11 = offset & 0x07ff;
- uint32_t j1 = (offset >> 19) & 1;
- uint32_t j2 = (offset >> 17) & 1;
- value = (signbit << 26) | (j1 << 13) | (j2 << 11) | (imm6 << 16) |
- imm11;
- // Remove the offset from the current encoding.
- inst &= ~(0x3f << 16 | 0x7ff);
- }
- // Mask out offset bits in current instruction.
- inst &= ~(B26 | B13 | B11);
- inst |= value;
- return inst;
-}
-
-
-int Thumb2Assembler::DecodeBranchOffset(int32_t instr) {
- int32_t imm32;
- if ((instr & B12) == B12) {
- uint32_t S = (instr >> 26) & 1;
- uint32_t J2 = (instr >> 11) & 1;
- uint32_t J1 = (instr >> 13) & 1;
- uint32_t imm10 = (instr >> 16) & 0x3FF;
- uint32_t imm11 = instr & 0x7FF;
-
- uint32_t I1 = ~(J1 ^ S) & 1;
- uint32_t I2 = ~(J2 ^ S) & 1;
- imm32 = (S << 24) | (I1 << 23) | (I2 << 22) | (imm10 << 12) | (imm11 << 1);
- imm32 = (imm32 << 8) >> 8; // sign extend 24 bit immediate.
- } else {
- uint32_t S = (instr >> 26) & 1;
- uint32_t J2 = (instr >> 11) & 1;
- uint32_t J1 = (instr >> 13) & 1;
- uint32_t imm6 = (instr >> 16) & 0x3F;
- uint32_t imm11 = instr & 0x7FF;
-
- imm32 = (S << 20) | (J2 << 19) | (J1 << 18) | (imm6 << 12) | (imm11 << 1);
- imm32 = (imm32 << 11) >> 11; // sign extend 21 bit immediate.
- }
- imm32 += 4;
- return imm32;
-}
-
-uint32_t Thumb2Assembler::GetAdjustedPosition(uint32_t old_position) {
- // We can reconstruct the adjustment by going through all the fixups from the beginning
- // up to the old_position. Since we expect AdjustedPosition() to be called in a loop
- // with increasing old_position, we can use the data from last AdjustedPosition() to
- // continue where we left off and the whole loop should be O(m+n) where m is the number
- // of positions to adjust and n is the number of fixups.
- if (old_position < last_old_position_) {
- last_position_adjustment_ = 0u;
- last_old_position_ = 0u;
- last_fixup_id_ = 0u;
- }
- while (last_fixup_id_ != fixups_.size()) {
- Fixup* fixup = GetFixup(last_fixup_id_);
- if (fixup->GetLocation() >= old_position + last_position_adjustment_) {
- break;
- }
- if (fixup->GetSize() != fixup->GetOriginalSize()) {
- last_position_adjustment_ += fixup->GetSizeInBytes() - fixup->GetOriginalSizeInBytes();
- }
- ++last_fixup_id_;
- }
- last_old_position_ = old_position;
- return old_position + last_position_adjustment_;
-}
-
-Literal* Thumb2Assembler::NewLiteral(size_t size, const uint8_t* data) {
- DCHECK(size == 4u || size == 8u) << size;
- literals_.emplace_back(size, data);
- return &literals_.back();
-}
-
-void Thumb2Assembler::LoadLiteral(Register rt, Literal* literal) {
- DCHECK_EQ(literal->GetSize(), 4u);
- DCHECK(!literal->GetLabel()->IsBound());
- bool use32bit = IsForced32Bit() || IsHighRegister(rt);
- uint32_t location = buffer_.Size();
- Fixup::Size size = use32bit ? Fixup::kLiteral4KiB : Fixup::kLiteral1KiB;
- FixupId fixup_id = AddFixup(Fixup::LoadNarrowLiteral(location, rt, size));
- Emit16(static_cast<uint16_t>(literal->GetLabel()->position_));
- literal->GetLabel()->LinkTo(fixup_id);
- if (use32bit) {
- Emit16(0);
- }
- DCHECK_EQ(location + GetFixup(fixup_id)->GetSizeInBytes(), buffer_.Size());
-}
-
-void Thumb2Assembler::LoadLiteral(Register rt, Register rt2, Literal* literal) {
- DCHECK_EQ(literal->GetSize(), 8u);
- DCHECK(!literal->GetLabel()->IsBound());
- uint32_t location = buffer_.Size();
- FixupId fixup_id =
- AddFixup(Fixup::LoadWideLiteral(location, rt, rt2, Fixup::kLongOrFPLiteral1KiB));
- Emit16(static_cast<uint16_t>(literal->GetLabel()->position_));
- literal->GetLabel()->LinkTo(fixup_id);
- Emit16(0);
- DCHECK_EQ(location + GetFixup(fixup_id)->GetSizeInBytes(), buffer_.Size());
-}
-
-void Thumb2Assembler::LoadLiteral(SRegister sd, Literal* literal) {
- DCHECK_EQ(literal->GetSize(), 4u);
- DCHECK(!literal->GetLabel()->IsBound());
- uint32_t location = buffer_.Size();
- FixupId fixup_id = AddFixup(Fixup::LoadSingleLiteral(location, sd, Fixup::kLongOrFPLiteral1KiB));
- Emit16(static_cast<uint16_t>(literal->GetLabel()->position_));
- literal->GetLabel()->LinkTo(fixup_id);
- Emit16(0);
- DCHECK_EQ(location + GetFixup(fixup_id)->GetSizeInBytes(), buffer_.Size());
-}
-
-void Thumb2Assembler::LoadLiteral(DRegister dd, Literal* literal) {
- DCHECK_EQ(literal->GetSize(), 8u);
- DCHECK(!literal->GetLabel()->IsBound());
- uint32_t location = buffer_.Size();
- FixupId fixup_id = AddFixup(Fixup::LoadDoubleLiteral(location, dd, Fixup::kLongOrFPLiteral1KiB));
- Emit16(static_cast<uint16_t>(literal->GetLabel()->position_));
- literal->GetLabel()->LinkTo(fixup_id);
- Emit16(0);
- DCHECK_EQ(location + GetFixup(fixup_id)->GetSizeInBytes(), buffer_.Size());
-}
-
-
-void Thumb2Assembler::AddConstant(Register rd, Register rn, int32_t value,
- Condition cond, SetCc set_cc) {
- if (value == 0 && set_cc != kCcSet) {
- if (rd != rn) {
- mov(rd, ShifterOperand(rn), cond);
- }
- return;
- }
- // We prefer to select the shorter code sequence rather than selecting add for
- // positive values and sub for negatives ones, which would slightly improve
- // the readability of generated code for some constants.
- ShifterOperand shifter_op;
- if (ShifterOperandCanHold(rd, rn, ADD, value, set_cc, &shifter_op)) {
- add(rd, rn, shifter_op, cond, set_cc);
- } else if (ShifterOperandCanHold(rd, rn, SUB, -value, set_cc, &shifter_op)) {
- sub(rd, rn, shifter_op, cond, set_cc);
- } else {
- CHECK(rn != IP);
- // If rd != rn, use rd as temp. This alows 16-bit ADD/SUB in more situations than using IP.
- Register temp = (rd != rn) ? rd : IP;
- if (ShifterOperandCanHold(temp, kNoRegister, MVN, ~value, kCcKeep, &shifter_op)) {
- mvn(temp, shifter_op, cond, kCcKeep);
- add(rd, rn, ShifterOperand(temp), cond, set_cc);
- } else if (ShifterOperandCanHold(temp, kNoRegister, MVN, ~(-value), kCcKeep, &shifter_op)) {
- mvn(temp, shifter_op, cond, kCcKeep);
- sub(rd, rn, ShifterOperand(temp), cond, set_cc);
- } else if (High16Bits(-value) == 0) {
- movw(temp, Low16Bits(-value), cond);
- sub(rd, rn, ShifterOperand(temp), cond, set_cc);
- } else {
- movw(temp, Low16Bits(value), cond);
- uint16_t value_high = High16Bits(value);
- if (value_high != 0) {
- movt(temp, value_high, cond);
- }
- add(rd, rn, ShifterOperand(temp), cond, set_cc);
- }
- }
-}
-
-void Thumb2Assembler::CmpConstant(Register rn, int32_t value, Condition cond) {
- // We prefer to select the shorter code sequence rather than using plain cmp and cmn
- // which would slightly improve the readability of generated code for some constants.
- ShifterOperand shifter_op;
- if (ShifterOperandCanHold(kNoRegister, rn, CMP, value, kCcSet, &shifter_op)) {
- cmp(rn, shifter_op, cond);
- } else if (ShifterOperandCanHold(kNoRegister, rn, CMN, -value, kCcSet, &shifter_op)) {
- cmn(rn, shifter_op, cond);
- } else {
- CHECK(rn != IP);
- if (ShifterOperandCanHold(IP, kNoRegister, MVN, ~value, kCcKeep, &shifter_op)) {
- mvn(IP, shifter_op, cond, kCcKeep);
- cmp(rn, ShifterOperand(IP), cond);
- } else if (ShifterOperandCanHold(IP, kNoRegister, MVN, ~(-value), kCcKeep, &shifter_op)) {
- mvn(IP, shifter_op, cond, kCcKeep);
- cmn(rn, ShifterOperand(IP), cond);
- } else if (High16Bits(-value) == 0) {
- movw(IP, Low16Bits(-value), cond);
- cmn(rn, ShifterOperand(IP), cond);
- } else {
- movw(IP, Low16Bits(value), cond);
- uint16_t value_high = High16Bits(value);
- if (value_high != 0) {
- movt(IP, value_high, cond);
- }
- cmp(rn, ShifterOperand(IP), cond);
- }
- }
-}
-
-void Thumb2Assembler::LoadImmediate(Register rd, int32_t value, Condition cond) {
- ShifterOperand shifter_op;
- if (ShifterOperandCanHold(rd, R0, MOV, value, &shifter_op)) {
- mov(rd, shifter_op, cond);
- } else if (ShifterOperandCanHold(rd, R0, MVN, ~value, &shifter_op)) {
- mvn(rd, shifter_op, cond);
- } else {
- movw(rd, Low16Bits(value), cond);
- uint16_t value_high = High16Bits(value);
- if (value_high != 0) {
- movt(rd, value_high, cond);
- }
- }
-}
-
-void Thumb2Assembler::LoadDImmediate(DRegister dd, double value, Condition cond) {
- if (!vmovd(dd, value, cond)) {
- uint64_t int_value = bit_cast<uint64_t, double>(value);
- if (int_value == bit_cast<uint64_t, double>(0.0)) {
- // 0.0 is quite common, so we special case it by loading
- // 2.0 in `dd` and then subtracting it.
- bool success = vmovd(dd, 2.0, cond);
- CHECK(success);
- vsubd(dd, dd, dd, cond);
- } else {
- Literal* literal = literal64_dedupe_map_.GetOrCreate(
- int_value,
- [this, int_value]() { return NewLiteral<uint64_t>(int_value); });
- LoadLiteral(dd, literal);
- }
- }
-}
-
-int32_t Thumb2Assembler::GetAllowedLoadOffsetBits(LoadOperandType type) {
- switch (type) {
- case kLoadSignedByte:
- case kLoadSignedHalfword:
- case kLoadUnsignedHalfword:
- case kLoadUnsignedByte:
- case kLoadWord:
- // We can encode imm12 offset.
- return 0xfffu;
- case kLoadSWord:
- case kLoadDWord:
- case kLoadWordPair:
- // We can encode imm8:'00' offset.
- return 0xff << 2;
- default:
- LOG(FATAL) << "UNREACHABLE";
- UNREACHABLE();
- }
-}
-
-int32_t Thumb2Assembler::GetAllowedStoreOffsetBits(StoreOperandType type) {
- switch (type) {
- case kStoreHalfword:
- case kStoreByte:
- case kStoreWord:
- // We can encode imm12 offset.
- return 0xfff;
- case kStoreSWord:
- case kStoreDWord:
- case kStoreWordPair:
- // We can encode imm8:'00' offset.
- return 0xff << 2;
- default:
- LOG(FATAL) << "UNREACHABLE";
- UNREACHABLE();
- }
-}
-
-bool Thumb2Assembler::CanSplitLoadStoreOffset(int32_t allowed_offset_bits,
- int32_t offset,
- /*out*/ int32_t* add_to_base,
- /*out*/ int32_t* offset_for_load_store) {
- int32_t other_bits = offset & ~allowed_offset_bits;
- if (ShifterOperandCanAlwaysHold(other_bits) || ShifterOperandCanAlwaysHold(-other_bits)) {
- *add_to_base = offset & ~allowed_offset_bits;
- *offset_for_load_store = offset & allowed_offset_bits;
- return true;
- }
- return false;
-}
-
-int32_t Thumb2Assembler::AdjustLoadStoreOffset(int32_t allowed_offset_bits,
- Register temp,
- Register base,
- int32_t offset,
- Condition cond) {
- DCHECK_NE(offset & ~allowed_offset_bits, 0);
- int32_t add_to_base, offset_for_load;
- if (CanSplitLoadStoreOffset(allowed_offset_bits, offset, &add_to_base, &offset_for_load)) {
- AddConstant(temp, base, add_to_base, cond, kCcKeep);
- return offset_for_load;
- } else {
- LoadImmediate(temp, offset, cond);
- add(temp, temp, ShifterOperand(base), cond, kCcKeep);
- return 0;
- }
-}
-
-// Implementation note: this method must emit at most one instruction when
-// Address::CanHoldLoadOffsetThumb.
-void Thumb2Assembler::LoadFromOffset(LoadOperandType type,
- Register reg,
- Register base,
- int32_t offset,
- Condition cond) {
- if (!Address::CanHoldLoadOffsetThumb(type, offset)) {
- CHECK_NE(base, IP);
- // Inlined AdjustLoadStoreOffset() allows us to pull a few more tricks.
- int32_t allowed_offset_bits = GetAllowedLoadOffsetBits(type);
- DCHECK_NE(offset & ~allowed_offset_bits, 0);
- int32_t add_to_base, offset_for_load;
- if (CanSplitLoadStoreOffset(allowed_offset_bits, offset, &add_to_base, &offset_for_load)) {
- // Use reg for the adjusted base. If it's low reg, we may end up using 16-bit load.
- AddConstant(reg, base, add_to_base, cond, kCcKeep);
- base = reg;
- offset = offset_for_load;
- } else {
- Register temp = (reg == base) ? IP : reg;
- LoadImmediate(temp, offset, cond);
- // TODO: Implement indexed load (not available for LDRD) and use it here to avoid the ADD.
- // Use reg for the adjusted base. If it's low reg, we may end up using 16-bit load.
- add(reg, reg, ShifterOperand((reg == base) ? IP : base), cond, kCcKeep);
- base = reg;
- offset = 0;
- }
- }
- DCHECK(Address::CanHoldLoadOffsetThumb(type, offset));
- switch (type) {
- case kLoadSignedByte:
- ldrsb(reg, Address(base, offset), cond);
- break;
- case kLoadUnsignedByte:
- ldrb(reg, Address(base, offset), cond);
- break;
- case kLoadSignedHalfword:
- ldrsh(reg, Address(base, offset), cond);
- break;
- case kLoadUnsignedHalfword:
- ldrh(reg, Address(base, offset), cond);
- break;
- case kLoadWord:
- ldr(reg, Address(base, offset), cond);
- break;
- case kLoadWordPair:
- ldrd(reg, Address(base, offset), cond);
- break;
- default:
- LOG(FATAL) << "UNREACHABLE";
- UNREACHABLE();
- }
-}
-
-// Implementation note: this method must emit at most one instruction when
-// Address::CanHoldLoadOffsetThumb, as expected by JIT::GuardedLoadFromOffset.
-void Thumb2Assembler::LoadSFromOffset(SRegister reg,
- Register base,
- int32_t offset,
- Condition cond) {
- if (!Address::CanHoldLoadOffsetThumb(kLoadSWord, offset)) {
- CHECK_NE(base, IP);
- offset = AdjustLoadStoreOffset(GetAllowedLoadOffsetBits(kLoadSWord), IP, base, offset, cond);
- base = IP;
- }
- DCHECK(Address::CanHoldLoadOffsetThumb(kLoadSWord, offset));
- vldrs(reg, Address(base, offset), cond);
-}
-
-
-// Implementation note: this method must emit at most one instruction when
-// Address::CanHoldLoadOffsetThumb, as expected by JIT::GuardedLoadFromOffset.
-void Thumb2Assembler::LoadDFromOffset(DRegister reg,
- Register base,
- int32_t offset,
- Condition cond) {
- if (!Address::CanHoldLoadOffsetThumb(kLoadDWord, offset)) {
- CHECK_NE(base, IP);
- offset = AdjustLoadStoreOffset(GetAllowedLoadOffsetBits(kLoadDWord), IP, base, offset, cond);
- base = IP;
- }
- DCHECK(Address::CanHoldLoadOffsetThumb(kLoadDWord, offset));
- vldrd(reg, Address(base, offset), cond);
-}
-
-
-// Implementation note: this method must emit at most one instruction when
-// Address::CanHoldStoreOffsetThumb.
-void Thumb2Assembler::StoreToOffset(StoreOperandType type,
- Register reg,
- Register base,
- int32_t offset,
- Condition cond) {
- Register tmp_reg = kNoRegister;
- if (!Address::CanHoldStoreOffsetThumb(type, offset)) {
- CHECK_NE(base, IP);
- if ((reg != IP) &&
- ((type != kStoreWordPair) || (reg + 1 != IP))) {
- tmp_reg = IP;
- } else {
- // Be careful not to use IP twice (for `reg` (or `reg` + 1 in
- // the case of a word-pair store) and `base`) to build the
- // Address object used by the store instruction(s) below.
- // Instead, save R5 on the stack (or R6 if R5 is already used by
- // `base`), use it as secondary temporary register, and restore
- // it after the store instruction has been emitted.
- tmp_reg = (base != R5) ? R5 : R6;
- Push(tmp_reg);
- if (base == SP) {
- offset += kRegisterSize;
- }
- }
- // TODO: Implement indexed store (not available for STRD), inline AdjustLoadStoreOffset()
- // and in the "unsplittable" path get rid of the "add" by using the store indexed instead.
- offset = AdjustLoadStoreOffset(GetAllowedStoreOffsetBits(type), tmp_reg, base, offset, cond);
- base = tmp_reg;
- }
- DCHECK(Address::CanHoldStoreOffsetThumb(type, offset));
- switch (type) {
- case kStoreByte:
- strb(reg, Address(base, offset), cond);
- break;
- case kStoreHalfword:
- strh(reg, Address(base, offset), cond);
- break;
- case kStoreWord:
- str(reg, Address(base, offset), cond);
- break;
- case kStoreWordPair:
- strd(reg, Address(base, offset), cond);
- break;
- default:
- LOG(FATAL) << "UNREACHABLE";
- UNREACHABLE();
- }
- if ((tmp_reg != kNoRegister) && (tmp_reg != IP)) {
- CHECK((tmp_reg == R5) || (tmp_reg == R6));
- Pop(tmp_reg);
- }
-}
-
-
-// Implementation note: this method must emit at most one instruction when
-// Address::CanHoldStoreOffsetThumb, as expected by JIT::GuardedStoreToOffset.
-void Thumb2Assembler::StoreSToOffset(SRegister reg,
- Register base,
- int32_t offset,
- Condition cond) {
- if (!Address::CanHoldStoreOffsetThumb(kStoreSWord, offset)) {
- CHECK_NE(base, IP);
- offset = AdjustLoadStoreOffset(GetAllowedStoreOffsetBits(kStoreSWord), IP, base, offset, cond);
- base = IP;
- }
- DCHECK(Address::CanHoldStoreOffsetThumb(kStoreSWord, offset));
- vstrs(reg, Address(base, offset), cond);
-}
-
-
-// Implementation note: this method must emit at most one instruction when
-// Address::CanHoldStoreOffsetThumb, as expected by JIT::GuardedStoreSToOffset.
-void Thumb2Assembler::StoreDToOffset(DRegister reg,
- Register base,
- int32_t offset,
- Condition cond) {
- if (!Address::CanHoldStoreOffsetThumb(kStoreDWord, offset)) {
- CHECK_NE(base, IP);
- offset = AdjustLoadStoreOffset(GetAllowedStoreOffsetBits(kStoreDWord), IP, base, offset, cond);
- base = IP;
- }
- DCHECK(Address::CanHoldStoreOffsetThumb(kStoreDWord, offset));
- vstrd(reg, Address(base, offset), cond);
-}
-
-
-void Thumb2Assembler::dmb(DmbOptions flavor) {
- int32_t encoding = 0xf3bf8f50; // dmb in T1 encoding.
- Emit32(encoding | flavor);
-}
-
-
-void Thumb2Assembler::CompareAndBranchIfZero(Register r, Label* label) {
- if (CanRelocateBranches() && IsLowRegister(r) && !label->IsBound()) {
- cbz(r, label);
- } else {
- cmp(r, ShifterOperand(0));
- b(label, EQ);
- }
-}
-
-
-void Thumb2Assembler::CompareAndBranchIfNonZero(Register r, Label* label) {
- if (CanRelocateBranches() && IsLowRegister(r) && !label->IsBound()) {
- cbnz(r, label);
- } else {
- cmp(r, ShifterOperand(0));
- b(label, NE);
- }
-}
-
-JumpTable* Thumb2Assembler::CreateJumpTable(std::vector<Label*>&& labels, Register base_reg) {
- jump_tables_.emplace_back(std::move(labels));
- JumpTable* table = &jump_tables_.back();
- DCHECK(!table->GetLabel()->IsBound());
-
- bool use32bit = IsForced32Bit() || IsHighRegister(base_reg);
- uint32_t location = buffer_.Size();
- Fixup::Size size = use32bit ? Fixup::kLiteralAddr4KiB : Fixup::kLiteralAddr1KiB;
- FixupId fixup_id = AddFixup(Fixup::LoadLiteralAddress(location, base_reg, size));
- Emit16(static_cast<uint16_t>(table->GetLabel()->position_));
- table->GetLabel()->LinkTo(fixup_id);
- if (use32bit) {
- Emit16(0);
- }
- DCHECK_EQ(location + GetFixup(fixup_id)->GetSizeInBytes(), buffer_.Size());
-
- return table;
-}
-
-void Thumb2Assembler::EmitJumpTableDispatch(JumpTable* jump_table, Register displacement_reg) {
- CHECK(!IsForced32Bit()) << "Forced 32-bit dispatch not implemented yet";
- // 32-bit ADD doesn't support PC as an input, so we need a two-instruction sequence:
- // SUB ip, ip, #0
- // ADD pc, ip, reg
- // TODO: Implement.
-
- // The anchor's position needs to be fixed up before we can compute offsets - so make it a tracked
- // label.
- BindTrackedLabel(jump_table->GetAnchorLabel());
-
- add(PC, PC, ShifterOperand(displacement_reg));
-}
-
-} // namespace arm
-} // namespace art
diff --git a/compiler/utils/arm/assembler_thumb2.h b/compiler/utils/arm/assembler_thumb2.h
deleted file mode 100644
index 2ff9018..0000000
--- a/compiler/utils/arm/assembler_thumb2.h
+++ /dev/null
@@ -1,948 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_COMPILER_UTILS_ARM_ASSEMBLER_THUMB2_H_
-#define ART_COMPILER_UTILS_ARM_ASSEMBLER_THUMB2_H_
-
-#include <deque>
-#include <utility>
-#include <vector>
-
-#include "base/arena_containers.h"
-#include "base/array_ref.h"
-#include "base/logging.h"
-#include "constants_arm.h"
-#include "utils/arm/managed_register_arm.h"
-#include "utils/arm/assembler_arm.h"
-#include "offsets.h"
-
-namespace art {
-namespace arm {
-
-class Thumb2Assembler FINAL : public ArmAssembler {
- public:
- explicit Thumb2Assembler(ArenaAllocator* arena, bool can_relocate_branches = true)
- : ArmAssembler(arena),
- can_relocate_branches_(can_relocate_branches),
- force_32bit_(false),
- it_cond_index_(kNoItCondition),
- next_condition_(AL),
- fixups_(arena->Adapter(kArenaAllocAssembler)),
- fixup_dependents_(arena->Adapter(kArenaAllocAssembler)),
- literals_(arena->Adapter(kArenaAllocAssembler)),
- literal64_dedupe_map_(std::less<uint64_t>(), arena->Adapter(kArenaAllocAssembler)),
- jump_tables_(arena->Adapter(kArenaAllocAssembler)),
- last_position_adjustment_(0u),
- last_old_position_(0u),
- last_fixup_id_(0u) {
- cfi().DelayEmittingAdvancePCs();
- }
-
- virtual ~Thumb2Assembler() {
- }
-
- bool IsThumb() const OVERRIDE {
- return true;
- }
-
- bool IsForced32Bit() const {
- return force_32bit_;
- }
-
- bool CanRelocateBranches() const {
- return can_relocate_branches_;
- }
-
- void FinalizeCode() OVERRIDE;
-
- // Data-processing instructions.
- virtual void and_(Register rd, Register rn, const ShifterOperand& so,
- Condition cond = AL, SetCc set_cc = kCcDontCare) OVERRIDE;
-
- virtual void eor(Register rd, Register rn, const ShifterOperand& so,
- Condition cond = AL, SetCc set_cc = kCcDontCare) OVERRIDE;
-
- virtual void sub(Register rd, Register rn, const ShifterOperand& so,
- Condition cond = AL, SetCc set_cc = kCcDontCare) OVERRIDE;
-
- virtual void rsb(Register rd, Register rn, const ShifterOperand& so,
- Condition cond = AL, SetCc set_cc = kCcDontCare) OVERRIDE;
-
- virtual void add(Register rd, Register rn, const ShifterOperand& so,
- Condition cond = AL, SetCc set_cc = kCcDontCare) OVERRIDE;
-
- virtual void adc(Register rd, Register rn, const ShifterOperand& so,
- Condition cond = AL, SetCc set_cc = kCcDontCare) OVERRIDE;
-
- virtual void sbc(Register rd, Register rn, const ShifterOperand& so,
- Condition cond = AL, SetCc set_cc = kCcDontCare) OVERRIDE;
-
- virtual void rsc(Register rd, Register rn, const ShifterOperand& so,
- Condition cond = AL, SetCc set_cc = kCcDontCare) OVERRIDE;
-
- void tst(Register rn, const ShifterOperand& so, Condition cond = AL) OVERRIDE;
-
- void teq(Register rn, const ShifterOperand& so, Condition cond = AL) OVERRIDE;
-
- void cmp(Register rn, const ShifterOperand& so, Condition cond = AL) OVERRIDE;
-
- void cmn(Register rn, const ShifterOperand& so, Condition cond = AL) OVERRIDE;
-
- virtual void orr(Register rd, Register rn, const ShifterOperand& so,
- Condition cond = AL, SetCc set_cc = kCcDontCare) OVERRIDE;
-
- virtual void orn(Register rd, Register rn, const ShifterOperand& so,
- Condition cond = AL, SetCc set_cc = kCcDontCare) OVERRIDE;
-
- virtual void mov(Register rd, const ShifterOperand& so,
- Condition cond = AL, SetCc set_cc = kCcDontCare) OVERRIDE;
-
- virtual void bic(Register rd, Register rn, const ShifterOperand& so,
- Condition cond = AL, SetCc set_cc = kCcDontCare) OVERRIDE;
-
- virtual void mvn(Register rd, const ShifterOperand& so,
- Condition cond = AL, SetCc set_cc = kCcDontCare) OVERRIDE;
-
- // Miscellaneous data-processing instructions.
- void clz(Register rd, Register rm, Condition cond = AL) OVERRIDE;
- void movw(Register rd, uint16_t imm16, Condition cond = AL) OVERRIDE;
- void movt(Register rd, uint16_t imm16, Condition cond = AL) OVERRIDE;
- void rbit(Register rd, Register rm, Condition cond = AL) OVERRIDE;
- void rev(Register rd, Register rm, Condition cond = AL) OVERRIDE;
- void rev16(Register rd, Register rm, Condition cond = AL) OVERRIDE;
- void revsh(Register rd, Register rm, Condition cond = AL) OVERRIDE;
-
- // Multiply instructions.
- void mul(Register rd, Register rn, Register rm, Condition cond = AL) OVERRIDE;
- void mla(Register rd, Register rn, Register rm, Register ra,
- Condition cond = AL) OVERRIDE;
- void mls(Register rd, Register rn, Register rm, Register ra,
- Condition cond = AL) OVERRIDE;
- void smull(Register rd_lo, Register rd_hi, Register rn, Register rm,
- Condition cond = AL) OVERRIDE;
- void umull(Register rd_lo, Register rd_hi, Register rn, Register rm,
- Condition cond = AL) OVERRIDE;
-
- void sdiv(Register rd, Register rn, Register rm, Condition cond = AL) OVERRIDE;
- void udiv(Register rd, Register rn, Register rm, Condition cond = AL) OVERRIDE;
-
- // Bit field extract instructions.
- void sbfx(Register rd, Register rn, uint32_t lsb, uint32_t width, Condition cond = AL) OVERRIDE;
- void ubfx(Register rd, Register rn, uint32_t lsb, uint32_t width, Condition cond = AL) OVERRIDE;
-
- // Load/store instructions.
- void ldr(Register rd, const Address& ad, Condition cond = AL) OVERRIDE;
- void str(Register rd, const Address& ad, Condition cond = AL) OVERRIDE;
-
- void ldrb(Register rd, const Address& ad, Condition cond = AL) OVERRIDE;
- void strb(Register rd, const Address& ad, Condition cond = AL) OVERRIDE;
-
- void ldrh(Register rd, const Address& ad, Condition cond = AL) OVERRIDE;
- void strh(Register rd, const Address& ad, Condition cond = AL) OVERRIDE;
-
- void ldrsb(Register rd, const Address& ad, Condition cond = AL) OVERRIDE;
- void ldrsh(Register rd, const Address& ad, Condition cond = AL) OVERRIDE;
-
- // Load/store register dual instructions using registers `rd` and `rd` + 1.
- void ldrd(Register rd, const Address& ad, Condition cond = AL) OVERRIDE;
- void strd(Register rd, const Address& ad, Condition cond = AL) OVERRIDE;
-
- // Load/store register dual instructions using registers `rd` and `rd2`.
- // Note that contrary to the ARM A1 encoding, the Thumb-2 T1 encoding
- // does not require `rd` to be even, nor `rd2' to be equal to `rd` + 1.
- void ldrd(Register rd, Register rd2, const Address& ad, Condition cond);
- void strd(Register rd, Register rd2, const Address& ad, Condition cond);
-
-
- void ldm(BlockAddressMode am, Register base,
- RegList regs, Condition cond = AL) OVERRIDE;
- void stm(BlockAddressMode am, Register base,
- RegList regs, Condition cond = AL) OVERRIDE;
-
- void ldrex(Register rd, Register rn, Condition cond = AL) OVERRIDE;
- void strex(Register rd, Register rt, Register rn, Condition cond = AL) OVERRIDE;
-
- void ldrex(Register rd, Register rn, uint16_t imm, Condition cond = AL);
- void strex(Register rd, Register rt, Register rn, uint16_t imm, Condition cond = AL);
-
- void ldrexd(Register rt, Register rt2, Register rn, Condition cond = AL) OVERRIDE;
- void strexd(Register rd, Register rt, Register rt2, Register rn, Condition cond = AL) OVERRIDE;
-
- // Miscellaneous instructions.
- void clrex(Condition cond = AL) OVERRIDE;
- void nop(Condition cond = AL) OVERRIDE;
-
- void bkpt(uint16_t imm16) OVERRIDE;
- void svc(uint32_t imm24) OVERRIDE;
-
- // If-then
- void it(Condition firstcond, ItState i1 = kItOmitted,
- ItState i2 = kItOmitted, ItState i3 = kItOmitted) OVERRIDE;
-
- void cbz(Register rn, Label* target) OVERRIDE;
- void cbnz(Register rn, Label* target) OVERRIDE;
-
- // Floating point instructions (VFPv3-D16 and VFPv3-D32 profiles).
- void vmovsr(SRegister sn, Register rt, Condition cond = AL) OVERRIDE;
- void vmovrs(Register rt, SRegister sn, Condition cond = AL) OVERRIDE;
- void vmovsrr(SRegister sm, Register rt, Register rt2, Condition cond = AL) OVERRIDE;
- void vmovrrs(Register rt, Register rt2, SRegister sm, Condition cond = AL) OVERRIDE;
- void vmovdrr(DRegister dm, Register rt, Register rt2, Condition cond = AL) OVERRIDE;
- void vmovrrd(Register rt, Register rt2, DRegister dm, Condition cond = AL) OVERRIDE;
- void vmovs(SRegister sd, SRegister sm, Condition cond = AL) OVERRIDE;
- void vmovd(DRegister dd, DRegister dm, Condition cond = AL) OVERRIDE;
-
- // Returns false if the immediate cannot be encoded.
- bool vmovs(SRegister sd, float s_imm, Condition cond = AL) OVERRIDE;
- bool vmovd(DRegister dd, double d_imm, Condition cond = AL) OVERRIDE;
-
- void vldrs(SRegister sd, const Address& ad, Condition cond = AL) OVERRIDE;
- void vstrs(SRegister sd, const Address& ad, Condition cond = AL) OVERRIDE;
- void vldrd(DRegister dd, const Address& ad, Condition cond = AL) OVERRIDE;
- void vstrd(DRegister dd, const Address& ad, Condition cond = AL) OVERRIDE;
-
- void vadds(SRegister sd, SRegister sn, SRegister sm, Condition cond = AL) OVERRIDE;
- void vaddd(DRegister dd, DRegister dn, DRegister dm, Condition cond = AL) OVERRIDE;
- void vsubs(SRegister sd, SRegister sn, SRegister sm, Condition cond = AL) OVERRIDE;
- void vsubd(DRegister dd, DRegister dn, DRegister dm, Condition cond = AL) OVERRIDE;
- void vmuls(SRegister sd, SRegister sn, SRegister sm, Condition cond = AL) OVERRIDE;
- void vmuld(DRegister dd, DRegister dn, DRegister dm, Condition cond = AL) OVERRIDE;
- void vmlas(SRegister sd, SRegister sn, SRegister sm, Condition cond = AL) OVERRIDE;
- void vmlad(DRegister dd, DRegister dn, DRegister dm, Condition cond = AL) OVERRIDE;
- void vmlss(SRegister sd, SRegister sn, SRegister sm, Condition cond = AL) OVERRIDE;
- void vmlsd(DRegister dd, DRegister dn, DRegister dm, Condition cond = AL) OVERRIDE;
- void vdivs(SRegister sd, SRegister sn, SRegister sm, Condition cond = AL) OVERRIDE;
- void vdivd(DRegister dd, DRegister dn, DRegister dm, Condition cond = AL) OVERRIDE;
-
- void vabss(SRegister sd, SRegister sm, Condition cond = AL) OVERRIDE;
- void vabsd(DRegister dd, DRegister dm, Condition cond = AL) OVERRIDE;
- void vnegs(SRegister sd, SRegister sm, Condition cond = AL) OVERRIDE;
- void vnegd(DRegister dd, DRegister dm, Condition cond = AL) OVERRIDE;
- void vsqrts(SRegister sd, SRegister sm, Condition cond = AL) OVERRIDE;
- void vsqrtd(DRegister dd, DRegister dm, Condition cond = AL) OVERRIDE;
-
- void vcvtsd(SRegister sd, DRegister dm, Condition cond = AL) OVERRIDE;
- void vcvtds(DRegister dd, SRegister sm, Condition cond = AL) OVERRIDE;
- void vcvtis(SRegister sd, SRegister sm, Condition cond = AL) OVERRIDE;
- void vcvtid(SRegister sd, DRegister dm, Condition cond = AL) OVERRIDE;
- void vcvtsi(SRegister sd, SRegister sm, Condition cond = AL) OVERRIDE;
- void vcvtdi(DRegister dd, SRegister sm, Condition cond = AL) OVERRIDE;
- void vcvtus(SRegister sd, SRegister sm, Condition cond = AL) OVERRIDE;
- void vcvtud(SRegister sd, DRegister dm, Condition cond = AL) OVERRIDE;
- void vcvtsu(SRegister sd, SRegister sm, Condition cond = AL) OVERRIDE;
- void vcvtdu(DRegister dd, SRegister sm, Condition cond = AL) OVERRIDE;
-
- void vcmps(SRegister sd, SRegister sm, Condition cond = AL) OVERRIDE;
- void vcmpd(DRegister dd, DRegister dm, Condition cond = AL) OVERRIDE;
- void vcmpsz(SRegister sd, Condition cond = AL) OVERRIDE;
- void vcmpdz(DRegister dd, Condition cond = AL) OVERRIDE;
- void vmstat(Condition cond = AL) OVERRIDE; // VMRS APSR_nzcv, FPSCR
-
- void vcntd(DRegister dd, DRegister dm) OVERRIDE;
- void vpaddld(DRegister dd, DRegister dm, int32_t size, bool is_unsigned) OVERRIDE;
-
- void vpushs(SRegister reg, int nregs, Condition cond = AL) OVERRIDE;
- void vpushd(DRegister reg, int nregs, Condition cond = AL) OVERRIDE;
- void vpops(SRegister reg, int nregs, Condition cond = AL) OVERRIDE;
- void vpopd(DRegister reg, int nregs, Condition cond = AL) OVERRIDE;
- void vldmiad(Register base_reg, DRegister reg, int nregs, Condition cond = AL) OVERRIDE;
- void vstmiad(Register base_reg, DRegister reg, int nregs, Condition cond = AL) OVERRIDE;
-
- // Branch instructions.
- void b(Label* label, Condition cond = AL);
- void bl(Label* label, Condition cond = AL);
- void blx(Label* label);
- void blx(Register rm, Condition cond = AL) OVERRIDE;
- void bx(Register rm, Condition cond = AL) OVERRIDE;
-
- // ADR instruction loading register for branching to the label, including the Thumb mode bit.
- void AdrCode(Register rt, Label* label) OVERRIDE;
-
- virtual void Lsl(Register rd, Register rm, uint32_t shift_imm,
- Condition cond = AL, SetCc set_cc = kCcDontCare) OVERRIDE;
- virtual void Lsr(Register rd, Register rm, uint32_t shift_imm,
- Condition cond = AL, SetCc set_cc = kCcDontCare) OVERRIDE;
- virtual void Asr(Register rd, Register rm, uint32_t shift_imm,
- Condition cond = AL, SetCc set_cc = kCcDontCare) OVERRIDE;
- virtual void Ror(Register rd, Register rm, uint32_t shift_imm,
- Condition cond = AL, SetCc set_cc = kCcDontCare) OVERRIDE;
- virtual void Rrx(Register rd, Register rm,
- Condition cond = AL, SetCc set_cc = kCcDontCare) OVERRIDE;
-
- virtual void Lsl(Register rd, Register rm, Register rn,
- Condition cond = AL, SetCc set_cc = kCcDontCare) OVERRIDE;
- virtual void Lsr(Register rd, Register rm, Register rn,
- Condition cond = AL, SetCc set_cc = kCcDontCare) OVERRIDE;
- virtual void Asr(Register rd, Register rm, Register rn,
- Condition cond = AL, SetCc set_cc = kCcDontCare) OVERRIDE;
- virtual void Ror(Register rd, Register rm, Register rn,
- Condition cond = AL, SetCc set_cc = kCcDontCare) OVERRIDE;
-
- void Push(Register rd, Condition cond = AL) OVERRIDE;
- void Pop(Register rd, Condition cond = AL) OVERRIDE;
-
- void PushList(RegList regs, Condition cond = AL) OVERRIDE;
- void PopList(RegList regs, Condition cond = AL) OVERRIDE;
- void StoreList(RegList regs, size_t stack_offset) OVERRIDE;
- void LoadList(RegList regs, size_t stack_offset) OVERRIDE;
-
- void Mov(Register rd, Register rm, Condition cond = AL) OVERRIDE;
-
- void CompareAndBranchIfZero(Register r, Label* label) OVERRIDE;
- void CompareAndBranchIfNonZero(Register r, Label* label) OVERRIDE;
-
- // Memory barriers.
- void dmb(DmbOptions flavor) OVERRIDE;
-
- // Get the final position of a label after local fixup based on the old position
- // recorded before FinalizeCode().
- uint32_t GetAdjustedPosition(uint32_t old_position) OVERRIDE;
-
- using ArmAssembler::NewLiteral; // Make the helper template visible.
-
- Literal* NewLiteral(size_t size, const uint8_t* data) OVERRIDE;
- void LoadLiteral(Register rt, Literal* literal) OVERRIDE;
- void LoadLiteral(Register rt, Register rt2, Literal* literal) OVERRIDE;
- void LoadLiteral(SRegister sd, Literal* literal) OVERRIDE;
- void LoadLiteral(DRegister dd, Literal* literal) OVERRIDE;
-
- // Add signed constant value to rd. May clobber IP.
- void AddConstant(Register rd, Register rn, int32_t value,
- Condition cond = AL, SetCc set_cc = kCcDontCare) OVERRIDE;
-
- void CmpConstant(Register rn, int32_t value, Condition cond = AL) OVERRIDE;
-
- // Load and Store. May clobber IP.
- void LoadImmediate(Register rd, int32_t value, Condition cond = AL) OVERRIDE;
- void LoadDImmediate(DRegister dd, double value, Condition cond = AL) OVERRIDE;
- void MarkExceptionHandler(Label* label) OVERRIDE;
- void LoadFromOffset(LoadOperandType type,
- Register reg,
- Register base,
- int32_t offset,
- Condition cond = AL) OVERRIDE;
- void StoreToOffset(StoreOperandType type,
- Register reg,
- Register base,
- int32_t offset,
- Condition cond = AL) OVERRIDE;
- void LoadSFromOffset(SRegister reg,
- Register base,
- int32_t offset,
- Condition cond = AL) OVERRIDE;
- void StoreSToOffset(SRegister reg,
- Register base,
- int32_t offset,
- Condition cond = AL) OVERRIDE;
- void LoadDFromOffset(DRegister reg,
- Register base,
- int32_t offset,
- Condition cond = AL) OVERRIDE;
- void StoreDToOffset(DRegister reg,
- Register base,
- int32_t offset,
- Condition cond = AL) OVERRIDE;
-
- bool ShifterOperandCanHold(Register rd,
- Register rn,
- Opcode opcode,
- uint32_t immediate,
- SetCc set_cc,
- ShifterOperand* shifter_op) OVERRIDE;
- using ArmAssembler::ShifterOperandCanHold; // Don't hide the non-virtual override.
-
- bool ShifterOperandCanAlwaysHold(uint32_t immediate) OVERRIDE;
-
-
- static bool IsInstructionForExceptionHandling(uintptr_t pc);
-
- // Emit data (e.g. encoded instruction or immediate) to the.
- // instruction stream.
- void Emit32(int32_t value); // Emit a 32 bit instruction in thumb format.
- void Emit16(int16_t value); // Emit a 16 bit instruction in little endian format.
- void Bind(Label* label) OVERRIDE;
-
- // Force the assembler to generate 32 bit instructions.
- void Force32Bit() {
- force_32bit_ = true;
- }
-
- void Allow16Bit() {
- force_32bit_ = false;
- }
-
- // Emit an ADR (or a sequence of instructions) to load the jump table address into base_reg. This
- // will generate a fixup.
- JumpTable* CreateJumpTable(std::vector<Label*>&& labels, Register base_reg) OVERRIDE;
- // Emit an ADD PC, X to dispatch a jump-table jump. This will generate a fixup.
- void EmitJumpTableDispatch(JumpTable* jump_table, Register displacement_reg) OVERRIDE;
-
- private:
- typedef uint16_t FixupId;
-
- // Fixup: branches and literal pool references.
- //
- // The thumb2 architecture allows branches to be either 16 or 32 bit instructions. This
- // depends on both the type of branch and the offset to which it is branching. The 16-bit
- // cbz and cbnz instructions may also need to be replaced with a separate 16-bit compare
- // instruction and a 16- or 32-bit branch instruction. Load from a literal pool can also be
- // 16-bit or 32-bit instruction and, if the method is large, we may need to use a sequence
- // of instructions to make up for the limited range of load literal instructions (up to
- // 4KiB for the 32-bit variant). When generating code for these insns we don't know the
- // size before hand, so we assume it is the smallest available size and determine the final
- // code offsets and sizes and emit code in FinalizeCode().
- //
- // To handle this, we keep a record of every branch and literal pool load in the program.
- // The actual instruction encoding for these is delayed until we know the final size of
- // every instruction. When we bind a label to a branch we don't know the final location yet
- // as some preceding instructions may need to be expanded, so we record a non-final offset.
- // In FinalizeCode(), we expand the sizes of branches and literal loads that are out of
- // range. With each expansion, we need to update dependent Fixups, i.e. insntructios with
- // target on the other side of the expanded insn, as their offsets change and this may
- // trigger further expansion.
- //
- // All Fixups have a 'fixup id' which is a 16 bit unsigned number used to identify the
- // Fixup. For each unresolved label we keep a singly-linked list of all Fixups pointing
- // to it, using the fixup ids as links. The first link is stored in the label's position
- // (the label is linked but not bound), the following links are stored in the code buffer,
- // in the placeholder where we will eventually emit the actual code.
-
- class Fixup {
- public:
- // Branch type.
- enum Type : uint8_t {
- kConditional, // B<cond>.
- kUnconditional, // B.
- kUnconditionalLink, // BL.
- kUnconditionalLinkX, // BLX.
- kCompareAndBranchXZero, // cbz/cbnz.
- kLoadCodeAddr, // Get address of a code label, used for Baker read barriers.
- kLoadLiteralNarrow, // Load narrrow integer literal.
- kLoadLiteralWide, // Load wide integer literal.
- kLoadLiteralAddr, // Load address of literal (used for jump table).
- kLoadFPLiteralSingle, // Load FP literal single.
- kLoadFPLiteralDouble, // Load FP literal double.
- };
-
- // Calculated size of branch instruction based on type and offset.
- enum Size : uint8_t {
- // Branch variants.
- kBranch16Bit,
- kBranch32Bit,
- // NOTE: We don't support branches which would require multiple instructions, i.e.
- // conditinoal branches beyond +-1MiB and unconditional branches beyond +-16MiB.
-
- // CBZ/CBNZ variants.
- kCbxz16Bit, // CBZ/CBNZ rX, label; X < 8; 7-bit positive offset.
- kCbxz32Bit, // CMP rX, #0 + Bcc label; X < 8; 16-bit Bcc; +-8-bit offset.
- kCbxz48Bit, // CMP rX, #0 + Bcc label; X < 8; 32-bit Bcc; up to +-1MiB offset.
-
- // ADR variants.
- kCodeAddr4KiB, // ADR rX, <label>; label must be after the ADR but within 4KiB range.
- // Multi-instruction expansion is not supported.
-
- // Load integer literal variants.
- // LDR rX, label; X < 8; 16-bit variant up to 1KiB offset; 2 bytes.
- kLiteral1KiB,
- // LDR rX, label; 32-bit variant up to 4KiB offset; 4 bytes.
- kLiteral4KiB,
- // MOV rX, imm16 + ADD rX, pc + LDR rX, [rX]; X < 8; up to 64KiB offset; 8 bytes.
- kLiteral64KiB,
- // MOV rX, modimm + ADD rX, pc + LDR rX, [rX, #imm12]; up to 1MiB offset; 10 bytes.
- kLiteral1MiB,
- // NOTE: We don't provide the 12-byte version of kLiteralFar below where the LDR is 16-bit.
- // MOV rX, imm16 + MOVT rX, imm16 + ADD rX, pc + LDR rX, [rX]; any offset; 14 bytes.
- kLiteralFar,
-
- // Load literal base addr.
- // ADR rX, label; X < 8; 8 bit immediate, shifted to 10 bit. 2 bytes.
- kLiteralAddr1KiB,
- // ADR rX, label; 4KiB offset. 4 bytes.
- kLiteralAddr4KiB,
- // MOV rX, imm16 + ADD rX, pc; 64KiB offset. 6 bytes.
- kLiteralAddr64KiB,
- // MOV rX, imm16 + MOVT rX, imm16 + ADD rX, pc; any offset; 10 bytes.
- kLiteralAddrFar,
-
- // Load long or FP literal variants.
- // VLDR s/dX, label; 32-bit insn, up to 1KiB offset; 4 bytes.
- kLongOrFPLiteral1KiB,
- // MOV ip, imm16 + ADD ip, pc + VLDR s/dX, [IP, #0]; up to 64KiB offset; 10 bytes.
- kLongOrFPLiteral64KiB,
- // MOV ip, imm16 + MOVT ip, imm16 + ADD ip, pc + VLDR s/dX, [IP]; any offset; 14 bytes.
- kLongOrFPLiteralFar,
- };
-
- // Unresolved branch possibly with a condition.
- static Fixup Branch(uint32_t location, Type type, Size size = kBranch16Bit,
- Condition cond = AL) {
- DCHECK(type == kConditional || type == kUnconditional ||
- type == kUnconditionalLink || type == kUnconditionalLinkX);
- DCHECK(size == kBranch16Bit || size == kBranch32Bit);
- DCHECK(size == kBranch32Bit || (type == kConditional || type == kUnconditional));
- return Fixup(kNoRegister, kNoRegister, kNoSRegister, kNoDRegister,
- cond, type, size, location);
- }
-
- // Unresolved compare-and-branch instruction with a register and condition (EQ or NE).
- static Fixup CompareAndBranch(uint32_t location, Register rn, Condition cond) {
- DCHECK(cond == EQ || cond == NE);
- return Fixup(rn, kNoRegister, kNoSRegister, kNoDRegister,
- cond, kCompareAndBranchXZero, kCbxz16Bit, location);
- }
-
- // Code address.
- static Fixup LoadCodeAddress(uint32_t location, Register rt) {
- return Fixup(rt, kNoRegister, kNoSRegister, kNoDRegister,
- AL, kLoadCodeAddr, kCodeAddr4KiB, location);
- }
-
- // Load narrow literal.
- static Fixup LoadNarrowLiteral(uint32_t location, Register rt, Size size) {
- DCHECK(size == kLiteral1KiB || size == kLiteral4KiB || size == kLiteral64KiB ||
- size == kLiteral1MiB || size == kLiteralFar);
- DCHECK(!IsHighRegister(rt) || (size != kLiteral1KiB && size != kLiteral64KiB));
- return Fixup(rt, kNoRegister, kNoSRegister, kNoDRegister,
- AL, kLoadLiteralNarrow, size, location);
- }
-
- // Load wide literal.
- static Fixup LoadWideLiteral(uint32_t location, Register rt, Register rt2,
- Size size = kLongOrFPLiteral1KiB) {
- DCHECK(size == kLongOrFPLiteral1KiB || size == kLongOrFPLiteral64KiB ||
- size == kLongOrFPLiteralFar);
- DCHECK(!IsHighRegister(rt) || (size != kLiteral1KiB && size != kLiteral64KiB));
- return Fixup(rt, rt2, kNoSRegister, kNoDRegister,
- AL, kLoadLiteralWide, size, location);
- }
-
- // Load FP single literal.
- static Fixup LoadSingleLiteral(uint32_t location, SRegister sd,
- Size size = kLongOrFPLiteral1KiB) {
- DCHECK(size == kLongOrFPLiteral1KiB || size == kLongOrFPLiteral64KiB ||
- size == kLongOrFPLiteralFar);
- return Fixup(kNoRegister, kNoRegister, sd, kNoDRegister,
- AL, kLoadFPLiteralSingle, size, location);
- }
-
- // Load FP double literal.
- static Fixup LoadDoubleLiteral(uint32_t location, DRegister dd,
- Size size = kLongOrFPLiteral1KiB) {
- DCHECK(size == kLongOrFPLiteral1KiB || size == kLongOrFPLiteral64KiB ||
- size == kLongOrFPLiteralFar);
- return Fixup(kNoRegister, kNoRegister, kNoSRegister, dd,
- AL, kLoadFPLiteralDouble, size, location);
- }
-
- static Fixup LoadLiteralAddress(uint32_t location, Register rt, Size size) {
- DCHECK(size == kLiteralAddr1KiB || size == kLiteralAddr4KiB || size == kLiteralAddr64KiB ||
- size == kLiteralAddrFar);
- DCHECK(!IsHighRegister(rt) || size != kLiteralAddr1KiB);
- return Fixup(rt, kNoRegister, kNoSRegister, kNoDRegister,
- AL, kLoadLiteralAddr, size, location);
- }
-
- Type GetType() const {
- return type_;
- }
-
- bool IsLoadLiteral() const {
- return GetType() >= kLoadLiteralNarrow;
- }
-
- // Returns whether the Fixup can expand from the original size.
- bool CanExpand() const {
- switch (GetOriginalSize()) {
- case kBranch32Bit:
- case kCbxz48Bit:
- case kCodeAddr4KiB:
- case kLiteralFar:
- case kLiteralAddrFar:
- case kLongOrFPLiteralFar:
- return false;
- default:
- return true;
- }
- }
-
- Size GetOriginalSize() const {
- return original_size_;
- }
-
- Size GetSize() const {
- return size_;
- }
-
- uint32_t GetOriginalSizeInBytes() const;
-
- uint32_t GetSizeInBytes() const;
-
- uint32_t GetLocation() const {
- return location_;
- }
-
- uint32_t GetTarget() const {
- return target_;
- }
-
- uint32_t GetAdjustment() const {
- return adjustment_;
- }
-
- // Prepare the assembler->fixup_dependents_ and each Fixup's dependents_start_/count_.
- static void PrepareDependents(Thumb2Assembler* assembler);
-
- ArrayRef<const FixupId> Dependents(const Thumb2Assembler& assembler) const {
- return ArrayRef<const FixupId>(assembler.fixup_dependents_).SubArray(dependents_start_,
- dependents_count_);
- }
-
- // Resolve a branch when the target is known.
- void Resolve(uint32_t target) {
- DCHECK_EQ(target_, kUnresolved);
- DCHECK_NE(target, kUnresolved);
- target_ = target;
- }
-
- // Branches with bound targets that are in range can be emitted early.
- // However, the caller still needs to check if the branch doesn't go over
- // another Fixup that's not ready to be emitted.
- bool IsCandidateForEmitEarly() const;
-
- // Check if the current size is OK for current location_, target_ and adjustment_.
- // If not, increase the size. Return the size increase, 0 if unchanged.
- // If the target if after this Fixup, also add the difference to adjustment_,
- // so that we don't need to consider forward Fixups as their own dependencies.
- uint32_t AdjustSizeIfNeeded(uint32_t current_code_size);
-
- // Increase adjustments. This is called for dependents of a Fixup when its size changes.
- void IncreaseAdjustment(uint32_t increase) {
- adjustment_ += increase;
- }
-
- // Finalize the branch with an adjustment to the location. Both location and target are updated.
- void Finalize(uint32_t location_adjustment) {
- DCHECK_NE(target_, kUnresolved);
- location_ += location_adjustment;
- target_ += location_adjustment;
- }
-
- // Emit the branch instruction into the assembler buffer. This does the
- // encoding into the thumb instruction.
- void Emit(uint32_t emit_location, AssemblerBuffer* buffer, uint32_t code_size) const;
-
- private:
- Fixup(Register rn, Register rt2, SRegister sd, DRegister dd,
- Condition cond, Type type, Size size, uint32_t location)
- : rn_(rn),
- rt2_(rt2),
- sd_(sd),
- dd_(dd),
- cond_(cond),
- type_(type),
- original_size_(size), size_(size),
- location_(location),
- target_(kUnresolved),
- adjustment_(0u),
- dependents_count_(0u),
- dependents_start_(0u) {
- }
-
- static size_t SizeInBytes(Size size);
-
- // The size of padding added before the literal pool.
- static size_t LiteralPoolPaddingSize(uint32_t current_code_size);
-
- // Returns the offset from the PC-using insn to the target.
- int32_t GetOffset(uint32_t current_code_size) const;
-
- size_t IncreaseSize(Size new_size);
-
- int32_t LoadWideOrFpEncoding(Register rbase, int32_t offset) const;
-
- template <typename Function>
- static void ForExpandableDependencies(Thumb2Assembler* assembler, Function fn);
-
- static constexpr uint32_t kUnresolved = 0xffffffff; // Value for target_ for unresolved.
-
- const Register rn_; // Rn for cbnz/cbz, Rt for literal loads.
- Register rt2_; // For kLoadLiteralWide.
- SRegister sd_; // For kLoadFPLiteralSingle.
- DRegister dd_; // For kLoadFPLiteralDouble.
- const Condition cond_;
- const Type type_;
- Size original_size_;
- Size size_;
- uint32_t location_; // Offset into assembler buffer in bytes.
- uint32_t target_; // Offset into assembler buffer in bytes.
- uint32_t adjustment_; // The number of extra bytes inserted between location_ and target_.
- // Fixups that require adjustment when current size changes are stored in a single
- // array in the assembler and we store only the start index and count here.
- uint32_t dependents_count_;
- uint32_t dependents_start_;
- };
-
- // Emit a single 32 or 16 bit data processing instruction.
- void EmitDataProcessing(Condition cond,
- Opcode opcode,
- SetCc set_cc,
- Register rn,
- Register rd,
- const ShifterOperand& so);
-
- // Emit a single 32 bit miscellaneous instruction.
- void Emit32Miscellaneous(uint8_t op1,
- uint8_t op2,
- uint32_t rest_encoding);
-
- // Emit reverse byte instructions: rev, rev16, revsh.
- void EmitReverseBytes(Register rd, Register rm, uint32_t op);
-
- // Emit a single 16 bit miscellaneous instruction.
- void Emit16Miscellaneous(uint32_t rest_encoding);
-
- // Must the instruction be 32 bits or can it possibly be encoded
- // in 16 bits?
- bool Is32BitDataProcessing(Condition cond,
- Opcode opcode,
- SetCc set_cc,
- Register rn,
- Register rd,
- const ShifterOperand& so);
-
- // Emit a 32 bit data processing instruction.
- void Emit32BitDataProcessing(Condition cond,
- Opcode opcode,
- SetCc set_cc,
- Register rn,
- Register rd,
- const ShifterOperand& so);
-
- // Emit a 16 bit data processing instruction.
- void Emit16BitDataProcessing(Condition cond,
- Opcode opcode,
- SetCc set_cc,
- Register rn,
- Register rd,
- const ShifterOperand& so);
-
- void Emit16BitAddSub(Condition cond,
- Opcode opcode,
- SetCc set_cc,
- Register rn,
- Register rd,
- const ShifterOperand& so);
-
- uint16_t EmitCompareAndBranch(Register rn, uint16_t prev, bool n);
-
- void EmitLoadStore(Condition cond,
- bool load,
- bool byte,
- bool half,
- bool is_signed,
- Register rd,
- const Address& ad);
-
- void EmitMemOpAddressMode3(Condition cond,
- int32_t mode,
- Register rd,
- const Address& ad);
-
- void EmitMultiMemOp(Condition cond,
- BlockAddressMode am,
- bool load,
- Register base,
- RegList regs);
-
- void EmitMulOp(Condition cond,
- int32_t opcode,
- Register rd,
- Register rn,
- Register rm,
- Register rs);
-
- void EmitVFPsss(Condition cond,
- int32_t opcode,
- SRegister sd,
- SRegister sn,
- SRegister sm);
-
- void EmitVLdmOrStm(int32_t rest,
- uint32_t reg,
- int nregs,
- Register rn,
- bool is_load,
- bool dbl,
- Condition cond);
-
- void EmitVFPddd(Condition cond,
- int32_t opcode,
- DRegister dd,
- DRegister dn,
- DRegister dm);
-
- void EmitVFPsd(Condition cond,
- int32_t opcode,
- SRegister sd,
- DRegister dm);
-
- void EmitVFPds(Condition cond,
- int32_t opcode,
- DRegister dd,
- SRegister sm);
-
- void EmitVPushPop(uint32_t reg, int nregs, bool push, bool dbl, Condition cond);
-
- void EmitBranch(Condition cond, Label* label, bool link, bool x);
- static int32_t EncodeBranchOffset(int32_t offset, int32_t inst);
- static int DecodeBranchOffset(int32_t inst);
- void EmitShift(Register rd, Register rm, Shift shift, uint8_t amount,
- Condition cond = AL, SetCc set_cc = kCcDontCare);
- void EmitShift(Register rd, Register rn, Shift shift, Register rm,
- Condition cond = AL, SetCc set_cc = kCcDontCare);
-
- static int32_t GetAllowedLoadOffsetBits(LoadOperandType type);
- static int32_t GetAllowedStoreOffsetBits(StoreOperandType type);
- bool CanSplitLoadStoreOffset(int32_t allowed_offset_bits,
- int32_t offset,
- /*out*/ int32_t* add_to_base,
- /*out*/ int32_t* offset_for_load_store);
- int32_t AdjustLoadStoreOffset(int32_t allowed_offset_bits,
- Register temp,
- Register base,
- int32_t offset,
- Condition cond);
-
- // Whether the assembler can relocate branches. If false, unresolved branches will be
- // emitted on 32bits.
- bool can_relocate_branches_;
-
- // Force the assembler to use 32 bit thumb2 instructions.
- bool force_32bit_;
-
- // IfThen conditions. Used to check that conditional instructions match the preceding IT.
- Condition it_conditions_[4];
- uint8_t it_cond_index_;
- Condition next_condition_;
-
- void SetItCondition(ItState s, Condition cond, uint8_t index);
-
- void CheckCondition(Condition cond) {
- CHECK_EQ(cond, next_condition_);
-
- // Move to the next condition if there is one.
- if (it_cond_index_ < 3) {
- ++it_cond_index_;
- next_condition_ = it_conditions_[it_cond_index_];
- } else {
- next_condition_ = AL;
- }
- }
-
- void CheckConditionLastIt(Condition cond) {
- if (it_cond_index_ < 3) {
- // Check that the next condition is AL. This means that the
- // current condition is the last in the IT block.
- CHECK_EQ(it_conditions_[it_cond_index_ + 1], AL);
- }
- CheckCondition(cond);
- }
-
- FixupId AddFixup(Fixup fixup) {
- FixupId fixup_id = static_cast<FixupId>(fixups_.size());
- fixups_.push_back(fixup);
- // For iterating using FixupId, we need the next id to be representable.
- DCHECK_EQ(static_cast<size_t>(static_cast<FixupId>(fixups_.size())), fixups_.size());
- return fixup_id;
- }
-
- Fixup* GetFixup(FixupId fixup_id) {
- DCHECK_LT(fixup_id, fixups_.size());
- return &fixups_[fixup_id];
- }
-
- void BindLabel(Label* label, uint32_t bound_pc);
- uint32_t BindLiterals();
- void BindJumpTables(uint32_t code_size);
- void AdjustFixupIfNeeded(Fixup* fixup, uint32_t* current_code_size,
- std::deque<FixupId>* fixups_to_recalculate);
- uint32_t AdjustFixups();
- void EmitFixups(uint32_t adjusted_code_size);
- void EmitLiterals();
- void EmitJumpTables();
- void PatchCFI();
-
- static int16_t BEncoding16(int32_t offset, Condition cond);
- static int32_t BEncoding32(int32_t offset, Condition cond);
- static int16_t CbxzEncoding16(Register rn, int32_t offset, Condition cond);
- static int16_t CmpRnImm8Encoding16(Register rn, int32_t value);
- static int16_t AddRdnRmEncoding16(Register rdn, Register rm);
- static int32_t MovwEncoding32(Register rd, int32_t value);
- static int32_t MovtEncoding32(Register rd, int32_t value);
- static int32_t MovModImmEncoding32(Register rd, int32_t value);
- static int16_t LdrLitEncoding16(Register rt, int32_t offset);
- static int32_t LdrLitEncoding32(Register rt, int32_t offset);
- static int32_t LdrdEncoding32(Register rt, Register rt2, Register rn, int32_t offset);
- static int32_t VldrsEncoding32(SRegister sd, Register rn, int32_t offset);
- static int32_t VldrdEncoding32(DRegister dd, Register rn, int32_t offset);
- static int16_t LdrRtRnImm5Encoding16(Register rt, Register rn, int32_t offset);
- static int32_t LdrRtRnImm12Encoding(Register rt, Register rn, int32_t offset);
- static int16_t AdrEncoding16(Register rd, int32_t offset);
- static int32_t AdrEncoding32(Register rd, int32_t offset);
-
- ArenaVector<Fixup> fixups_;
- ArenaVector<FixupId> fixup_dependents_;
-
- // Use std::deque<> for literal labels to allow insertions at the end
- // without invalidating pointers and references to existing elements.
- ArenaDeque<Literal> literals_;
-
- // Deduplication map for 64-bit literals, used for LoadDImmediate().
- ArenaSafeMap<uint64_t, Literal*> literal64_dedupe_map_;
-
- // Jump table list.
- ArenaDeque<JumpTable> jump_tables_;
-
- // Data for AdjustedPosition(), see the description there.
- uint32_t last_position_adjustment_;
- uint32_t last_old_position_;
- FixupId last_fixup_id_;
-};
-
-class ScopedForce32Bit {
- public:
- explicit ScopedForce32Bit(Thumb2Assembler* assembler, bool force = true)
- : assembler_(assembler), old_force_32bit_(assembler->IsForced32Bit()) {
- if (force) {
- assembler->Force32Bit();
- }
- }
-
- ~ScopedForce32Bit() {
- if (!old_force_32bit_) {
- assembler_->Allow16Bit();
- }
- }
-
- private:
- Thumb2Assembler* const assembler_;
- const bool old_force_32bit_;
-};
-
-} // namespace arm
-} // namespace art
-
-#endif // ART_COMPILER_UTILS_ARM_ASSEMBLER_THUMB2_H_
diff --git a/compiler/utils/arm/assembler_thumb2_test.cc b/compiler/utils/arm/assembler_thumb2_test.cc
deleted file mode 100644
index 0147a76..0000000
--- a/compiler/utils/arm/assembler_thumb2_test.cc
+++ /dev/null
@@ -1,1666 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "assembler_thumb2.h"
-
-#include "android-base/stringprintf.h"
-
-#include "base/stl_util.h"
-#include "utils/assembler_test.h"
-
-namespace art {
-
-using android::base::StringPrintf;
-
-class AssemblerThumb2Test : public AssemblerTest<arm::Thumb2Assembler,
- arm::Register, arm::SRegister,
- uint32_t> {
- protected:
- std::string GetArchitectureString() OVERRIDE {
- return "arm";
- }
-
- std::string GetAssemblerParameters() OVERRIDE {
- return " -march=armv7-a -mcpu=cortex-a15 -mfpu=neon -mthumb";
- }
-
- const char* GetAssemblyHeader() OVERRIDE {
- return kThumb2AssemblyHeader;
- }
-
- std::string GetDisassembleParameters() OVERRIDE {
- return " -D -bbinary -marm --disassembler-options=force-thumb --no-show-raw-insn";
- }
-
- void SetUpHelpers() OVERRIDE {
- if (registers_.size() == 0) {
- registers_.insert(end(registers_),
- { // NOLINT(whitespace/braces)
- new arm::Register(arm::R0),
- new arm::Register(arm::R1),
- new arm::Register(arm::R2),
- new arm::Register(arm::R3),
- new arm::Register(arm::R4),
- new arm::Register(arm::R5),
- new arm::Register(arm::R6),
- new arm::Register(arm::R7),
- new arm::Register(arm::R8),
- new arm::Register(arm::R9),
- new arm::Register(arm::R10),
- new arm::Register(arm::R11),
- new arm::Register(arm::R12),
- new arm::Register(arm::R13),
- new arm::Register(arm::R14),
- new arm::Register(arm::R15)
- });
- }
- }
-
- void TearDown() OVERRIDE {
- AssemblerTest::TearDown();
- STLDeleteElements(®isters_);
- }
-
- std::vector<arm::Register*> GetRegisters() OVERRIDE {
- return registers_;
- }
-
- uint32_t CreateImmediate(int64_t imm_value) OVERRIDE {
- return imm_value;
- }
-
- std::string RepeatInsn(size_t count, const std::string& insn) {
- std::string result;
- for (; count != 0u; --count) {
- result += insn;
- }
- return result;
- }
-
- private:
- std::vector<arm::Register*> registers_;
-
- static constexpr const char* kThumb2AssemblyHeader = ".syntax unified\n.thumb\n";
-};
-
-TEST_F(AssemblerThumb2Test, Toolchain) {
- EXPECT_TRUE(CheckTools());
-}
-
-#define __ GetAssembler()->
-
-TEST_F(AssemblerThumb2Test, Sbfx) {
- __ sbfx(arm::R0, arm::R1, 0, 1);
- __ sbfx(arm::R0, arm::R1, 0, 8);
- __ sbfx(arm::R0, arm::R1, 0, 16);
- __ sbfx(arm::R0, arm::R1, 0, 32);
-
- __ sbfx(arm::R0, arm::R1, 8, 1);
- __ sbfx(arm::R0, arm::R1, 8, 8);
- __ sbfx(arm::R0, arm::R1, 8, 16);
- __ sbfx(arm::R0, arm::R1, 8, 24);
-
- __ sbfx(arm::R0, arm::R1, 16, 1);
- __ sbfx(arm::R0, arm::R1, 16, 8);
- __ sbfx(arm::R0, arm::R1, 16, 16);
-
- __ sbfx(arm::R0, arm::R1, 31, 1);
-
- const char* expected =
- "sbfx r0, r1, #0, #1\n"
- "sbfx r0, r1, #0, #8\n"
- "sbfx r0, r1, #0, #16\n"
- "sbfx r0, r1, #0, #32\n"
-
- "sbfx r0, r1, #8, #1\n"
- "sbfx r0, r1, #8, #8\n"
- "sbfx r0, r1, #8, #16\n"
- "sbfx r0, r1, #8, #24\n"
-
- "sbfx r0, r1, #16, #1\n"
- "sbfx r0, r1, #16, #8\n"
- "sbfx r0, r1, #16, #16\n"
-
- "sbfx r0, r1, #31, #1\n";
- DriverStr(expected, "sbfx");
-}
-
-TEST_F(AssemblerThumb2Test, Ubfx) {
- __ ubfx(arm::R0, arm::R1, 0, 1);
- __ ubfx(arm::R0, arm::R1, 0, 8);
- __ ubfx(arm::R0, arm::R1, 0, 16);
- __ ubfx(arm::R0, arm::R1, 0, 32);
-
- __ ubfx(arm::R0, arm::R1, 8, 1);
- __ ubfx(arm::R0, arm::R1, 8, 8);
- __ ubfx(arm::R0, arm::R1, 8, 16);
- __ ubfx(arm::R0, arm::R1, 8, 24);
-
- __ ubfx(arm::R0, arm::R1, 16, 1);
- __ ubfx(arm::R0, arm::R1, 16, 8);
- __ ubfx(arm::R0, arm::R1, 16, 16);
-
- __ ubfx(arm::R0, arm::R1, 31, 1);
-
- const char* expected =
- "ubfx r0, r1, #0, #1\n"
- "ubfx r0, r1, #0, #8\n"
- "ubfx r0, r1, #0, #16\n"
- "ubfx r0, r1, #0, #32\n"
-
- "ubfx r0, r1, #8, #1\n"
- "ubfx r0, r1, #8, #8\n"
- "ubfx r0, r1, #8, #16\n"
- "ubfx r0, r1, #8, #24\n"
-
- "ubfx r0, r1, #16, #1\n"
- "ubfx r0, r1, #16, #8\n"
- "ubfx r0, r1, #16, #16\n"
-
- "ubfx r0, r1, #31, #1\n";
- DriverStr(expected, "ubfx");
-}
-
-TEST_F(AssemblerThumb2Test, Vmstat) {
- __ vmstat();
-
- const char* expected = "vmrs APSR_nzcv, FPSCR\n";
-
- DriverStr(expected, "vmrs");
-}
-
-TEST_F(AssemblerThumb2Test, ldrexd) {
- __ ldrexd(arm::R0, arm::R1, arm::R0);
- __ ldrexd(arm::R0, arm::R1, arm::R1);
- __ ldrexd(arm::R0, arm::R1, arm::R2);
- __ ldrexd(arm::R5, arm::R3, arm::R7);
-
- const char* expected =
- "ldrexd r0, r1, [r0]\n"
- "ldrexd r0, r1, [r1]\n"
- "ldrexd r0, r1, [r2]\n"
- "ldrexd r5, r3, [r7]\n";
- DriverStr(expected, "ldrexd");
-}
-
-TEST_F(AssemblerThumb2Test, strexd) {
- __ strexd(arm::R9, arm::R0, arm::R1, arm::R0);
- __ strexd(arm::R9, arm::R0, arm::R1, arm::R1);
- __ strexd(arm::R9, arm::R0, arm::R1, arm::R2);
- __ strexd(arm::R9, arm::R5, arm::R3, arm::R7);
-
- const char* expected =
- "strexd r9, r0, r1, [r0]\n"
- "strexd r9, r0, r1, [r1]\n"
- "strexd r9, r0, r1, [r2]\n"
- "strexd r9, r5, r3, [r7]\n";
- DriverStr(expected, "strexd");
-}
-
-TEST_F(AssemblerThumb2Test, clrex) {
- __ clrex();
-
- const char* expected = "clrex\n";
- DriverStr(expected, "clrex");
-}
-
-TEST_F(AssemblerThumb2Test, LdrdStrd) {
- __ ldrd(arm::R0, arm::Address(arm::R2, 8));
- __ ldrd(arm::R0, arm::Address(arm::R12));
- __ strd(arm::R0, arm::Address(arm::R2, 8));
-
- const char* expected =
- "ldrd r0, r1, [r2, #8]\n"
- "ldrd r0, r1, [r12]\n"
- "strd r0, r1, [r2, #8]\n";
- DriverStr(expected, "ldrdstrd");
-}
-
-TEST_F(AssemblerThumb2Test, eor) {
- __ eor(arm::R1, arm::R1, arm::ShifterOperand(arm::R0));
- __ eor(arm::R1, arm::R0, arm::ShifterOperand(arm::R1));
- __ eor(arm::R1, arm::R8, arm::ShifterOperand(arm::R0));
- __ eor(arm::R8, arm::R1, arm::ShifterOperand(arm::R0));
- __ eor(arm::R1, arm::R0, arm::ShifterOperand(arm::R8));
-
- const char* expected =
- "eors r1, r0\n"
- "eor r1, r0, r1\n"
- "eor r1, r8, r0\n"
- "eor r8, r1, r0\n"
- "eor r1, r0, r8\n";
- DriverStr(expected, "abs");
-}
-
-TEST_F(AssemblerThumb2Test, sub) {
- __ subs(arm::R1, arm::R0, arm::ShifterOperand(42));
- __ sub(arm::R1, arm::R0, arm::ShifterOperand(42));
- __ subs(arm::R1, arm::R0, arm::ShifterOperand(arm::R2, arm::ASR, 31));
- __ sub(arm::R1, arm::R0, arm::ShifterOperand(arm::R2, arm::ASR, 31));
-
- const char* expected =
- "subs r1, r0, #42\n"
- "sub.w r1, r0, #42\n"
- "subs r1, r0, r2, asr #31\n"
- "sub r1, r0, r2, asr #31\n";
- DriverStr(expected, "sub");
-}
-
-TEST_F(AssemblerThumb2Test, add) {
- __ adds(arm::R1, arm::R0, arm::ShifterOperand(42));
- __ add(arm::R1, arm::R0, arm::ShifterOperand(42));
- __ adds(arm::R1, arm::R0, arm::ShifterOperand(arm::R2, arm::ASR, 31));
- __ add(arm::R1, arm::R0, arm::ShifterOperand(arm::R2, arm::ASR, 31));
-
- const char* expected =
- "adds r1, r0, #42\n"
- "add.w r1, r0, #42\n"
- "adds r1, r0, r2, asr #31\n"
- "add r1, r0, r2, asr #31\n";
- DriverStr(expected, "add");
-}
-
-TEST_F(AssemblerThumb2Test, umull) {
- __ umull(arm::R0, arm::R1, arm::R2, arm::R3);
-
- const char* expected =
- "umull r0, r1, r2, r3\n";
- DriverStr(expected, "umull");
-}
-
-TEST_F(AssemblerThumb2Test, smull) {
- __ smull(arm::R0, arm::R1, arm::R2, arm::R3);
-
- const char* expected =
- "smull r0, r1, r2, r3\n";
- DriverStr(expected, "smull");
-}
-
-TEST_F(AssemblerThumb2Test, LoadByteFromThumbOffset) {
- arm::LoadOperandType type = arm::kLoadUnsignedByte;
-
- __ LoadFromOffset(type, arm::R0, arm::R7, 0);
- __ LoadFromOffset(type, arm::R1, arm::R7, 31);
- __ LoadFromOffset(type, arm::R2, arm::R7, 32);
- __ LoadFromOffset(type, arm::R3, arm::R7, 4095);
- __ LoadFromOffset(type, arm::R4, arm::SP, 0);
-
- const char* expected =
- "ldrb r0, [r7, #0]\n"
- "ldrb r1, [r7, #31]\n"
- "ldrb.w r2, [r7, #32]\n"
- "ldrb.w r3, [r7, #4095]\n"
- "ldrb.w r4, [sp, #0]\n";
- DriverStr(expected, "LoadByteFromThumbOffset");
-}
-
-TEST_F(AssemblerThumb2Test, StoreByteToThumbOffset) {
- arm::StoreOperandType type = arm::kStoreByte;
-
- __ StoreToOffset(type, arm::R0, arm::R7, 0);
- __ StoreToOffset(type, arm::R1, arm::R7, 31);
- __ StoreToOffset(type, arm::R2, arm::R7, 32);
- __ StoreToOffset(type, arm::R3, arm::R7, 4095);
- __ StoreToOffset(type, arm::R4, arm::SP, 0);
-
- const char* expected =
- "strb r0, [r7, #0]\n"
- "strb r1, [r7, #31]\n"
- "strb.w r2, [r7, #32]\n"
- "strb.w r3, [r7, #4095]\n"
- "strb.w r4, [sp, #0]\n";
- DriverStr(expected, "StoreByteToThumbOffset");
-}
-
-TEST_F(AssemblerThumb2Test, LoadHalfFromThumbOffset) {
- arm::LoadOperandType type = arm::kLoadUnsignedHalfword;
-
- __ LoadFromOffset(type, arm::R0, arm::R7, 0);
- __ LoadFromOffset(type, arm::R1, arm::R7, 62);
- __ LoadFromOffset(type, arm::R2, arm::R7, 64);
- __ LoadFromOffset(type, arm::R3, arm::R7, 4094);
- __ LoadFromOffset(type, arm::R4, arm::SP, 0);
- __ LoadFromOffset(type, arm::R5, arm::R7, 1); // Unaligned
-
- const char* expected =
- "ldrh r0, [r7, #0]\n"
- "ldrh r1, [r7, #62]\n"
- "ldrh.w r2, [r7, #64]\n"
- "ldrh.w r3, [r7, #4094]\n"
- "ldrh.w r4, [sp, #0]\n"
- "ldrh.w r5, [r7, #1]\n";
- DriverStr(expected, "LoadHalfFromThumbOffset");
-}
-
-TEST_F(AssemblerThumb2Test, StoreHalfToThumbOffset) {
- arm::StoreOperandType type = arm::kStoreHalfword;
-
- __ StoreToOffset(type, arm::R0, arm::R7, 0);
- __ StoreToOffset(type, arm::R1, arm::R7, 62);
- __ StoreToOffset(type, arm::R2, arm::R7, 64);
- __ StoreToOffset(type, arm::R3, arm::R7, 4094);
- __ StoreToOffset(type, arm::R4, arm::SP, 0);
- __ StoreToOffset(type, arm::R5, arm::R7, 1); // Unaligned
-
- const char* expected =
- "strh r0, [r7, #0]\n"
- "strh r1, [r7, #62]\n"
- "strh.w r2, [r7, #64]\n"
- "strh.w r3, [r7, #4094]\n"
- "strh.w r4, [sp, #0]\n"
- "strh.w r5, [r7, #1]\n";
- DriverStr(expected, "StoreHalfToThumbOffset");
-}
-
-TEST_F(AssemblerThumb2Test, LoadWordFromSpPlusOffset) {
- arm::LoadOperandType type = arm::kLoadWord;
-
- __ LoadFromOffset(type, arm::R0, arm::SP, 0);
- __ LoadFromOffset(type, arm::R1, arm::SP, 124);
- __ LoadFromOffset(type, arm::R2, arm::SP, 128);
- __ LoadFromOffset(type, arm::R3, arm::SP, 1020);
- __ LoadFromOffset(type, arm::R4, arm::SP, 1024);
- __ LoadFromOffset(type, arm::R5, arm::SP, 4092);
- __ LoadFromOffset(type, arm::R6, arm::SP, 1); // Unaligned
-
- const char* expected =
- "ldr r0, [sp, #0]\n"
- "ldr r1, [sp, #124]\n"
- "ldr r2, [sp, #128]\n"
- "ldr r3, [sp, #1020]\n"
- "ldr.w r4, [sp, #1024]\n"
- "ldr.w r5, [sp, #4092]\n"
- "ldr.w r6, [sp, #1]\n";
- DriverStr(expected, "LoadWordFromSpPlusOffset");
-}
-
-TEST_F(AssemblerThumb2Test, StoreWordToSpPlusOffset) {
- arm::StoreOperandType type = arm::kStoreWord;
-
- __ StoreToOffset(type, arm::R0, arm::SP, 0);
- __ StoreToOffset(type, arm::R1, arm::SP, 124);
- __ StoreToOffset(type, arm::R2, arm::SP, 128);
- __ StoreToOffset(type, arm::R3, arm::SP, 1020);
- __ StoreToOffset(type, arm::R4, arm::SP, 1024);
- __ StoreToOffset(type, arm::R5, arm::SP, 4092);
- __ StoreToOffset(type, arm::R6, arm::SP, 1); // Unaligned
-
- const char* expected =
- "str r0, [sp, #0]\n"
- "str r1, [sp, #124]\n"
- "str r2, [sp, #128]\n"
- "str r3, [sp, #1020]\n"
- "str.w r4, [sp, #1024]\n"
- "str.w r5, [sp, #4092]\n"
- "str.w r6, [sp, #1]\n";
- DriverStr(expected, "StoreWordToSpPlusOffset");
-}
-
-TEST_F(AssemblerThumb2Test, LoadWordFromPcPlusOffset) {
- arm::LoadOperandType type = arm::kLoadWord;
-
- __ LoadFromOffset(type, arm::R0, arm::PC, 0);
- __ LoadFromOffset(type, arm::R1, arm::PC, 124);
- __ LoadFromOffset(type, arm::R2, arm::PC, 128);
- __ LoadFromOffset(type, arm::R3, arm::PC, 1020);
- __ LoadFromOffset(type, arm::R4, arm::PC, 1024);
- __ LoadFromOffset(type, arm::R5, arm::PC, 4092);
- __ LoadFromOffset(type, arm::R6, arm::PC, 1); // Unaligned
-
- const char* expected =
- "ldr r0, [pc, #0]\n"
- "ldr r1, [pc, #124]\n"
- "ldr r2, [pc, #128]\n"
- "ldr r3, [pc, #1020]\n"
- "ldr.w r4, [pc, #1024]\n"
- "ldr.w r5, [pc, #4092]\n"
- "ldr.w r6, [pc, #1]\n";
- DriverStr(expected, "LoadWordFromPcPlusOffset");
-}
-
-TEST_F(AssemblerThumb2Test, StoreWordToThumbOffset) {
- arm::StoreOperandType type = arm::kStoreWord;
- int32_t offset = 4092;
- ASSERT_TRUE(arm::Address::CanHoldStoreOffsetThumb(type, offset));
-
- __ StoreToOffset(type, arm::R0, arm::SP, offset);
- __ StoreToOffset(type, arm::IP, arm::SP, offset);
- __ StoreToOffset(type, arm::IP, arm::R5, offset);
-
- const char* expected =
- "str r0, [sp, #4092]\n"
- "str ip, [sp, #4092]\n"
- "str ip, [r5, #4092]\n";
- DriverStr(expected, "StoreWordToThumbOffset");
-}
-
-TEST_F(AssemblerThumb2Test, StoreWordToNonThumbOffset) {
- arm::StoreOperandType type = arm::kStoreWord;
- int32_t offset = 4096;
- ASSERT_FALSE(arm::Address::CanHoldStoreOffsetThumb(type, offset));
-
- __ StoreToOffset(type, arm::R0, arm::SP, offset);
- __ StoreToOffset(type, arm::IP, arm::SP, offset);
- __ StoreToOffset(type, arm::IP, arm::R5, offset);
-
- const char* expected =
- "add.w ip, sp, #4096\n" // AddConstant(ip, sp, 4096)
- "str r0, [ip, #0]\n"
-
- "str r5, [sp, #-4]!\n" // Push(r5)
- "add.w r5, sp, #4096\n" // AddConstant(r5, 4100 & ~0xfff)
- "str ip, [r5, #4]\n" // StoreToOffset(type, ip, r5, 4100 & 0xfff)
- "ldr r5, [sp], #4\n" // Pop(r5)
-
- "str r6, [sp, #-4]!\n" // Push(r6)
- "add.w r6, r5, #4096\n" // AddConstant(r6, r5, 4096 & ~0xfff)
- "str ip, [r6, #0]\n" // StoreToOffset(type, ip, r6, 4096 & 0xfff)
- "ldr r6, [sp], #4\n"; // Pop(r6)
- DriverStr(expected, "StoreWordToNonThumbOffset");
-}
-
-TEST_F(AssemblerThumb2Test, StoreWordPairToThumbOffset) {
- arm::StoreOperandType type = arm::kStoreWordPair;
- int32_t offset = 1020;
- ASSERT_TRUE(arm::Address::CanHoldStoreOffsetThumb(type, offset));
-
- __ StoreToOffset(type, arm::R0, arm::SP, offset);
- // We cannot use IP (i.e. R12) as first source register, as it would
- // force us to use SP (i.e. R13) as second source register, which
- // would have an "unpredictable" effect according to the ARMv7
- // specification (the T1 encoding describes the result as
- // UNPREDICTABLE when of the source registers is R13).
- //
- // So we use (R11, IP) (e.g. (R11, R12)) as source registers in the
- // following instructions.
- __ StoreToOffset(type, arm::R11, arm::SP, offset);
- __ StoreToOffset(type, arm::R11, arm::R5, offset);
-
- const char* expected =
- "strd r0, r1, [sp, #1020]\n"
- "strd r11, ip, [sp, #1020]\n"
- "strd r11, ip, [r5, #1020]\n";
- DriverStr(expected, "StoreWordPairToThumbOffset");
-}
-
-TEST_F(AssemblerThumb2Test, StoreWordPairToNonThumbOffset) {
- arm::StoreOperandType type = arm::kStoreWordPair;
- int32_t offset = 1024;
- ASSERT_FALSE(arm::Address::CanHoldStoreOffsetThumb(type, offset));
-
- __ StoreToOffset(type, arm::R0, arm::SP, offset);
- // Same comment as in AssemblerThumb2Test.StoreWordPairToThumbOffset
- // regarding the use of (R11, IP) (e.g. (R11, R12)) as source
- // registers in the following instructions.
- __ StoreToOffset(type, arm::R11, arm::SP, offset);
- __ StoreToOffset(type, arm::R11, arm::R5, offset);
-
- const char* expected =
- "add.w ip, sp, #1024\n" // AddConstant(ip, sp, 1024)
- "strd r0, r1, [ip, #0]\n"
-
- "str r5, [sp, #-4]!\n" // Push(r5)
- "add.w r5, sp, #1024\n" // AddConstant(r5, sp, (1024 + kRegisterSize) & ~0x3fc)
- "strd r11, ip, [r5, #4]\n" // StoreToOffset(type, r11, sp, (1024 + kRegisterSize) & 0x3fc)
- "ldr r5, [sp], #4\n" // Pop(r5)
-
- "str r6, [sp, #-4]!\n" // Push(r6)
- "add.w r6, r5, #1024\n" // AddConstant(r6, r5, 1024 & ~0x3fc)
- "strd r11, ip, [r6, #0]\n" // StoreToOffset(type, r11, r6, 1024 & 0x3fc)
- "ldr r6, [sp], #4\n"; // Pop(r6)
- DriverStr(expected, "StoreWordPairToNonThumbOffset");
-}
-
-TEST_F(AssemblerThumb2Test, DistantBackBranch) {
- Label start, end;
- __ Bind(&start);
- constexpr size_t kLdrR0R0Count1 = 256;
- for (size_t i = 0; i != kLdrR0R0Count1; ++i) {
- __ ldr(arm::R0, arm::Address(arm::R0));
- }
- __ b(&end, arm::EQ);
- __ b(&start, arm::LT);
- constexpr size_t kLdrR0R0Count2 = 256;
- for (size_t i = 0; i != kLdrR0R0Count2; ++i) {
- __ ldr(arm::R0, arm::Address(arm::R0));
- }
- __ Bind(&end);
-
- std::string expected =
- "0:\n" +
- RepeatInsn(kLdrR0R0Count1, "ldr r0, [r0]\n") +
- "beq 1f\n"
- "blt 0b\n" +
- RepeatInsn(kLdrR0R0Count2, "ldr r0, [r0]\n") +
- "1:\n";
- DriverStr(expected, "DistantBackBranch");
-}
-
-TEST_F(AssemblerThumb2Test, TwoCbzMaxOffset) {
- Label label0, label1, label2;
- __ cbz(arm::R0, &label1);
- constexpr size_t kLdrR0R0Count1 = 63;
- for (size_t i = 0; i != kLdrR0R0Count1; ++i) {
- __ ldr(arm::R0, arm::Address(arm::R0));
- }
- __ Bind(&label0);
- __ cbz(arm::R0, &label2);
- __ Bind(&label1);
- constexpr size_t kLdrR0R0Count2 = 64;
- for (size_t i = 0; i != kLdrR0R0Count2; ++i) {
- __ ldr(arm::R0, arm::Address(arm::R0));
- }
- __ Bind(&label2);
-
- std::string expected =
- "cbz r0, 1f\n" + // cbz r0, label1
- RepeatInsn(kLdrR0R0Count1, "ldr r0, [r0]\n") +
- "0:\n"
- "cbz r0, 2f\n" // cbz r0, label2
- "1:\n" +
- RepeatInsn(kLdrR0R0Count2, "ldr r0, [r0]\n") +
- "2:\n";
- DriverStr(expected, "TwoCbzMaxOffset");
-
- EXPECT_EQ(static_cast<uint32_t>(label0.Position()) + 0u,
- __ GetAdjustedPosition(label0.Position()));
- EXPECT_EQ(static_cast<uint32_t>(label1.Position()) + 0u,
- __ GetAdjustedPosition(label1.Position()));
- EXPECT_EQ(static_cast<uint32_t>(label2.Position()) + 0u,
- __ GetAdjustedPosition(label2.Position()));
-}
-
-TEST_F(AssemblerThumb2Test, TwoCbzBeyondMaxOffset) {
- Label label0, label1, label2;
- __ cbz(arm::R0, &label1);
- constexpr size_t kLdrR0R0Count1 = 63;
- for (size_t i = 0; i != kLdrR0R0Count1; ++i) {
- __ ldr(arm::R0, arm::Address(arm::R0));
- }
- __ Bind(&label0);
- __ cbz(arm::R0, &label2);
- __ Bind(&label1);
- constexpr size_t kLdrR0R0Count2 = 65;
- for (size_t i = 0; i != kLdrR0R0Count2; ++i) {
- __ ldr(arm::R0, arm::Address(arm::R0));
- }
- __ Bind(&label2);
-
- std::string expected =
- "cmp r0, #0\n" // cbz r0, label1
- "beq.n 1f\n" +
- RepeatInsn(kLdrR0R0Count1, "ldr r0, [r0]\n") +
- "0:\n"
- "cmp r0, #0\n" // cbz r0, label2
- "beq.n 2f\n"
- "1:\n" +
- RepeatInsn(kLdrR0R0Count2, "ldr r0, [r0]\n") +
- "2:\n";
- DriverStr(expected, "TwoCbzBeyondMaxOffset");
-
- EXPECT_EQ(static_cast<uint32_t>(label0.Position()) + 2u,
- __ GetAdjustedPosition(label0.Position()));
- EXPECT_EQ(static_cast<uint32_t>(label1.Position()) + 4u,
- __ GetAdjustedPosition(label1.Position()));
- EXPECT_EQ(static_cast<uint32_t>(label2.Position()) + 4u,
- __ GetAdjustedPosition(label2.Position()));
-}
-
-TEST_F(AssemblerThumb2Test, TwoCbzSecondAtMaxB16Offset) {
- Label label0, label1, label2;
- __ cbz(arm::R0, &label1);
- constexpr size_t kLdrR0R0Count1 = 62;
- for (size_t i = 0; i != kLdrR0R0Count1; ++i) {
- __ ldr(arm::R0, arm::Address(arm::R0));
- }
- __ Bind(&label0);
- __ cbz(arm::R0, &label2);
- __ Bind(&label1);
- constexpr size_t kLdrR0R0Count2 = 128;
- for (size_t i = 0; i != kLdrR0R0Count2; ++i) {
- __ ldr(arm::R0, arm::Address(arm::R0));
- }
- __ Bind(&label2);
-
- std::string expected =
- "cbz r0, 1f\n" + // cbz r0, label1
- RepeatInsn(kLdrR0R0Count1, "ldr r0, [r0]\n") +
- "0:\n"
- "cmp r0, #0\n" // cbz r0, label2
- "beq.n 2f\n"
- "1:\n" +
- RepeatInsn(kLdrR0R0Count2, "ldr r0, [r0]\n") +
- "2:\n";
- DriverStr(expected, "TwoCbzSecondAtMaxB16Offset");
-
- EXPECT_EQ(static_cast<uint32_t>(label0.Position()) + 0u,
- __ GetAdjustedPosition(label0.Position()));
- EXPECT_EQ(static_cast<uint32_t>(label1.Position()) + 2u,
- __ GetAdjustedPosition(label1.Position()));
- EXPECT_EQ(static_cast<uint32_t>(label2.Position()) + 2u,
- __ GetAdjustedPosition(label2.Position()));
-}
-
-TEST_F(AssemblerThumb2Test, TwoCbzSecondBeyondMaxB16Offset) {
- Label label0, label1, label2;
- __ cbz(arm::R0, &label1);
- constexpr size_t kLdrR0R0Count1 = 62;
- for (size_t i = 0; i != kLdrR0R0Count1; ++i) {
- __ ldr(arm::R0, arm::Address(arm::R0));
- }
- __ Bind(&label0);
- __ cbz(arm::R0, &label2);
- __ Bind(&label1);
- constexpr size_t kLdrR0R0Count2 = 129;
- for (size_t i = 0; i != kLdrR0R0Count2; ++i) {
- __ ldr(arm::R0, arm::Address(arm::R0));
- }
- __ Bind(&label2);
-
- std::string expected =
- "cmp r0, #0\n" // cbz r0, label1
- "beq.n 1f\n" +
- RepeatInsn(kLdrR0R0Count1, "ldr r0, [r0]\n") +
- "0:\n"
- "cmp r0, #0\n" // cbz r0, label2
- "beq.w 2f\n"
- "1:\n" +
- RepeatInsn(kLdrR0R0Count2, "ldr r0, [r0]\n") +
- "2:\n";
- DriverStr(expected, "TwoCbzSecondBeyondMaxB16Offset");
-
- EXPECT_EQ(static_cast<uint32_t>(label0.Position()) + 2u,
- __ GetAdjustedPosition(label0.Position()));
- EXPECT_EQ(static_cast<uint32_t>(label1.Position()) + 6u,
- __ GetAdjustedPosition(label1.Position()));
- EXPECT_EQ(static_cast<uint32_t>(label2.Position()) + 6u,
- __ GetAdjustedPosition(label2.Position()));
-}
-
-TEST_F(AssemblerThumb2Test, TwoCbzFirstAtMaxB16Offset) {
- Label label0, label1, label2;
- __ cbz(arm::R0, &label1);
- constexpr size_t kLdrR0R0Count1 = 127;
- for (size_t i = 0; i != kLdrR0R0Count1; ++i) {
- __ ldr(arm::R0, arm::Address(arm::R0));
- }
- __ Bind(&label0);
- __ cbz(arm::R0, &label2);
- __ Bind(&label1);
- constexpr size_t kLdrR0R0Count2 = 64;
- for (size_t i = 0; i != kLdrR0R0Count2; ++i) {
- __ ldr(arm::R0, arm::Address(arm::R0));
- }
- __ Bind(&label2);
-
- std::string expected =
- "cmp r0, #0\n" // cbz r0, label1
- "beq.n 1f\n" +
- RepeatInsn(kLdrR0R0Count1, "ldr r0, [r0]\n") +
- "0:\n"
- "cbz r0, 2f\n" // cbz r0, label2
- "1:\n" +
- RepeatInsn(kLdrR0R0Count2, "ldr r0, [r0]\n") +
- "2:\n";
- DriverStr(expected, "TwoCbzFirstAtMaxB16Offset");
-
- EXPECT_EQ(static_cast<uint32_t>(label0.Position()) + 2u,
- __ GetAdjustedPosition(label0.Position()));
- EXPECT_EQ(static_cast<uint32_t>(label1.Position()) + 2u,
- __ GetAdjustedPosition(label1.Position()));
- EXPECT_EQ(static_cast<uint32_t>(label2.Position()) + 2u,
- __ GetAdjustedPosition(label2.Position()));
-}
-
-TEST_F(AssemblerThumb2Test, TwoCbzFirstBeyondMaxB16Offset) {
- Label label0, label1, label2;
- __ cbz(arm::R0, &label1);
- constexpr size_t kLdrR0R0Count1 = 127;
- for (size_t i = 0; i != kLdrR0R0Count1; ++i) {
- __ ldr(arm::R0, arm::Address(arm::R0));
- }
- __ Bind(&label0);
- __ cbz(arm::R0, &label2);
- __ Bind(&label1);
- constexpr size_t kLdrR0R0Count2 = 65;
- for (size_t i = 0; i != kLdrR0R0Count2; ++i) {
- __ ldr(arm::R0, arm::Address(arm::R0));
- }
- __ Bind(&label2);
-
- std::string expected =
- "cmp r0, #0\n" // cbz r0, label1
- "beq.w 1f\n" +
- RepeatInsn(kLdrR0R0Count1, "ldr r0, [r0]\n") +
- "0:\n"
- "cmp r0, #0\n" // cbz r0, label2
- "beq.n 2f\n"
- "1:\n" +
- RepeatInsn(kLdrR0R0Count2, "ldr r0, [r0]\n") +
- "2:\n";
- DriverStr(expected, "TwoCbzFirstBeyondMaxB16Offset");
-
- EXPECT_EQ(static_cast<uint32_t>(label0.Position()) + 4u,
- __ GetAdjustedPosition(label0.Position()));
- EXPECT_EQ(static_cast<uint32_t>(label1.Position()) + 6u,
- __ GetAdjustedPosition(label1.Position()));
- EXPECT_EQ(static_cast<uint32_t>(label2.Position()) + 6u,
- __ GetAdjustedPosition(label2.Position()));
-}
-
-TEST_F(AssemblerThumb2Test, LoadLiteralMax1KiB) {
- arm::Literal* literal = __ NewLiteral<int32_t>(0x12345678);
- __ LoadLiteral(arm::R0, literal);
- Label label;
- __ Bind(&label);
- constexpr size_t kLdrR0R0Count = 511;
- for (size_t i = 0; i != kLdrR0R0Count; ++i) {
- __ ldr(arm::R0, arm::Address(arm::R0));
- }
-
- std::string expected =
- "1:\n"
- "ldr.n r0, [pc, #((2f - 1b - 2) & ~2)]\n" +
- RepeatInsn(kLdrR0R0Count, "ldr r0, [r0]\n") +
- ".align 2, 0\n"
- "2:\n"
- ".word 0x12345678\n";
- DriverStr(expected, "LoadLiteralMax1KiB");
-
- EXPECT_EQ(static_cast<uint32_t>(label.Position()) + 0u,
- __ GetAdjustedPosition(label.Position()));
-}
-
-TEST_F(AssemblerThumb2Test, LoadLiteralBeyondMax1KiB) {
- arm::Literal* literal = __ NewLiteral<int32_t>(0x12345678);
- __ LoadLiteral(arm::R0, literal);
- Label label;
- __ Bind(&label);
- constexpr size_t kLdrR0R0Count = 512;
- for (size_t i = 0; i != kLdrR0R0Count; ++i) {
- __ ldr(arm::R0, arm::Address(arm::R0));
- }
-
- std::string expected =
- "1:\n"
- "ldr.w r0, [pc, #((2f - 1b - 2) & ~2)]\n" +
- RepeatInsn(kLdrR0R0Count, "ldr r0, [r0]\n") +
- ".align 2, 0\n"
- "2:\n"
- ".word 0x12345678\n";
- DriverStr(expected, "LoadLiteralBeyondMax1KiB");
-
- EXPECT_EQ(static_cast<uint32_t>(label.Position()) + 2u,
- __ GetAdjustedPosition(label.Position()));
-}
-
-TEST_F(AssemblerThumb2Test, LoadLiteralMax4KiB) {
- arm::Literal* literal = __ NewLiteral<int32_t>(0x12345678);
- __ LoadLiteral(arm::R1, literal);
- Label label;
- __ Bind(&label);
- constexpr size_t kLdrR0R0Count = 2046;
- for (size_t i = 0; i != kLdrR0R0Count; ++i) {
- __ ldr(arm::R0, arm::Address(arm::R0));
- }
-
- std::string expected =
- "1:\n"
- "ldr.w r1, [pc, #((2f - 1b - 2) & ~2)]\n" +
- RepeatInsn(kLdrR0R0Count, "ldr r0, [r0]\n") +
- ".align 2, 0\n"
- "2:\n"
- ".word 0x12345678\n";
- DriverStr(expected, "LoadLiteralMax4KiB");
-
- EXPECT_EQ(static_cast<uint32_t>(label.Position()) + 2u,
- __ GetAdjustedPosition(label.Position()));
-}
-
-TEST_F(AssemblerThumb2Test, LoadLiteralBeyondMax4KiB) {
- arm::Literal* literal = __ NewLiteral<int32_t>(0x12345678);
- __ LoadLiteral(arm::R1, literal);
- Label label;
- __ Bind(&label);
- constexpr size_t kLdrR0R0Count = 2047;
- for (size_t i = 0; i != kLdrR0R0Count; ++i) {
- __ ldr(arm::R0, arm::Address(arm::R0));
- }
-
- std::string expected =
- "movw r1, #4096\n" // "as" does not consider (2f - 1f - 4) a constant expression for movw.
- "1:\n"
- "add r1, pc\n"
- "ldr r1, [r1, #0]\n" +
- RepeatInsn(kLdrR0R0Count, "ldr r0, [r0]\n") +
- ".align 2, 0\n"
- "2:\n"
- ".word 0x12345678\n";
- DriverStr(expected, "LoadLiteralBeyondMax4KiB");
-
- EXPECT_EQ(static_cast<uint32_t>(label.Position()) + 6u,
- __ GetAdjustedPosition(label.Position()));
-}
-
-TEST_F(AssemblerThumb2Test, LoadLiteralMax64KiB) {
- arm::Literal* literal = __ NewLiteral<int32_t>(0x12345678);
- __ LoadLiteral(arm::R1, literal);
- Label label;
- __ Bind(&label);
- constexpr size_t kLdrR0R0Count = (1u << 15) - 2u;
- for (size_t i = 0; i != kLdrR0R0Count; ++i) {
- __ ldr(arm::R0, arm::Address(arm::R0));
- }
-
- std::string expected =
- "movw r1, #0xfffc\n" // "as" does not consider (2f - 1f - 4) a constant expression for movw.
- "1:\n"
- "add r1, pc\n"
- "ldr r1, [r1, #0]\n" +
- RepeatInsn(kLdrR0R0Count, "ldr r0, [r0]\n") +
- ".align 2, 0\n"
- "2:\n"
- ".word 0x12345678\n";
- DriverStr(expected, "LoadLiteralMax64KiB");
-
- EXPECT_EQ(static_cast<uint32_t>(label.Position()) + 6u,
- __ GetAdjustedPosition(label.Position()));
-}
-
-TEST_F(AssemblerThumb2Test, LoadLiteralBeyondMax64KiB) {
- arm::Literal* literal = __ NewLiteral<int32_t>(0x12345678);
- __ LoadLiteral(arm::R1, literal);
- Label label;
- __ Bind(&label);
- constexpr size_t kLdrR0R0Count = (1u << 15) - 1u;
- for (size_t i = 0; i != kLdrR0R0Count; ++i) {
- __ ldr(arm::R0, arm::Address(arm::R0));
- }
-
- std::string expected =
- "mov.w r1, #((2f - 1f - 4) & ~0xfff)\n"
- "1:\n"
- "add r1, pc\n"
- "ldr r1, [r1, #((2f - 1b - 4) & 0xfff)]\n" +
- RepeatInsn(kLdrR0R0Count, "ldr r0, [r0]\n") +
- ".align 2, 0\n"
- "2:\n"
- ".word 0x12345678\n";
- DriverStr(expected, "LoadLiteralBeyondMax64KiB");
-
- EXPECT_EQ(static_cast<uint32_t>(label.Position()) + 8u,
- __ GetAdjustedPosition(label.Position()));
-}
-
-TEST_F(AssemblerThumb2Test, LoadLiteralMax1MiB) {
- arm::Literal* literal = __ NewLiteral<int32_t>(0x12345678);
- __ LoadLiteral(arm::R1, literal);
- Label label;
- __ Bind(&label);
- constexpr size_t kLdrR0R0Count = (1u << 19) - 3u;
- for (size_t i = 0; i != kLdrR0R0Count; ++i) {
- __ ldr(arm::R0, arm::Address(arm::R0));
- }
-
- std::string expected =
- "mov.w r1, #((2f - 1f - 4) & ~0xfff)\n"
- "1:\n"
- "add r1, pc\n"
- "ldr r1, [r1, #((2f - 1b - 4) & 0xfff)]\n" +
- RepeatInsn(kLdrR0R0Count, "ldr r0, [r0]\n") +
- ".align 2, 0\n"
- "2:\n"
- ".word 0x12345678\n";
- DriverStr(expected, "LoadLiteralMax1MiB");
-
- EXPECT_EQ(static_cast<uint32_t>(label.Position()) + 8u,
- __ GetAdjustedPosition(label.Position()));
-}
-
-TEST_F(AssemblerThumb2Test, LoadLiteralBeyondMax1MiB) {
- arm::Literal* literal = __ NewLiteral<int32_t>(0x12345678);
- __ LoadLiteral(arm::R1, literal);
- Label label;
- __ Bind(&label);
- constexpr size_t kLdrR0R0Count = (1u << 19) - 2u;
- for (size_t i = 0; i != kLdrR0R0Count; ++i) {
- __ ldr(arm::R0, arm::Address(arm::R0));
- }
-
- std::string expected =
- // "as" does not consider ((2f - 1f - 4) & 0xffff) a constant expression for movw.
- "movw r1, #(0x100000 & 0xffff)\n"
- // "as" does not consider ((2f - 1f - 4) >> 16) a constant expression for movt.
- "movt r1, #(0x100000 >> 16)\n"
- "1:\n"
- "add r1, pc\n"
- "ldr.w r1, [r1, #0]\n" +
- RepeatInsn(kLdrR0R0Count, "ldr r0, [r0]\n") +
- ".align 2, 0\n"
- "2:\n"
- ".word 0x12345678\n";
- DriverStr(expected, "LoadLiteralBeyondMax1MiB");
-
- EXPECT_EQ(static_cast<uint32_t>(label.Position()) + 12u,
- __ GetAdjustedPosition(label.Position()));
-}
-
-TEST_F(AssemblerThumb2Test, LoadLiteralFar) {
- arm::Literal* literal = __ NewLiteral<int32_t>(0x12345678);
- __ LoadLiteral(arm::R1, literal);
- Label label;
- __ Bind(&label);
- constexpr size_t kLdrR0R0Count = (1u << 19) - 2u + 0x1234;
- for (size_t i = 0; i != kLdrR0R0Count; ++i) {
- __ ldr(arm::R0, arm::Address(arm::R0));
- }
-
- std::string expected =
- // "as" does not consider ((2f - 1f - 4) & 0xffff) a constant expression for movw.
- "movw r1, #((0x100000 + 2 * 0x1234) & 0xffff)\n"
- // "as" does not consider ((2f - 1f - 4) >> 16) a constant expression for movt.
- "movt r1, #((0x100000 + 2 * 0x1234) >> 16)\n"
- "1:\n"
- "add r1, pc\n"
- "ldr.w r1, [r1, #0]\n" +
- RepeatInsn(kLdrR0R0Count, "ldr r0, [r0]\n") +
- ".align 2, 0\n"
- "2:\n"
- ".word 0x12345678\n";
- DriverStr(expected, "LoadLiteralFar");
-
- EXPECT_EQ(static_cast<uint32_t>(label.Position()) + 12u,
- __ GetAdjustedPosition(label.Position()));
-}
-
-TEST_F(AssemblerThumb2Test, LoadLiteralWideMax1KiB) {
- arm::Literal* literal = __ NewLiteral<int64_t>(INT64_C(0x1234567887654321));
- __ LoadLiteral(arm::R1, arm::R3, literal);
- Label label;
- __ Bind(&label);
- constexpr size_t kLdrR0R0Count = 510;
- for (size_t i = 0; i != kLdrR0R0Count; ++i) {
- __ ldr(arm::R0, arm::Address(arm::R0));
- }
-
- std::string expected =
- "1:\n"
- "ldrd r1, r3, [pc, #((2f - 1b - 2) & ~2)]\n" +
- RepeatInsn(kLdrR0R0Count, "ldr r0, [r0]\n") +
- ".align 2, 0\n"
- "2:\n"
- ".word 0x87654321\n"
- ".word 0x12345678\n";
- DriverStr(expected, "LoadLiteralWideMax1KiB");
-
- EXPECT_EQ(static_cast<uint32_t>(label.Position()) + 0u,
- __ GetAdjustedPosition(label.Position()));
-}
-
-TEST_F(AssemblerThumb2Test, LoadLiteralWideBeyondMax1KiB) {
- arm::Literal* literal = __ NewLiteral<int64_t>(INT64_C(0x1234567887654321));
- __ LoadLiteral(arm::R1, arm::R3, literal);
- Label label;
- __ Bind(&label);
- constexpr size_t kLdrR0R0Count = 511;
- for (size_t i = 0; i != kLdrR0R0Count; ++i) {
- __ ldr(arm::R0, arm::Address(arm::R0));
- }
-
- std::string expected =
- // "as" does not consider ((2f - 1f - 4) & 0xffff) a constant expression for movw.
- "movw ip, #(0x408 - 0x4 - 4)\n"
- "1:\n"
- "add ip, pc\n"
- "ldrd r1, r3, [ip, #0]\n" +
- RepeatInsn(kLdrR0R0Count, "ldr r0, [r0]\n") +
- ".align 2, 0\n"
- "2:\n"
- ".word 0x87654321\n"
- ".word 0x12345678\n";
- DriverStr(expected, "LoadLiteralWideBeyondMax1KiB");
-
- EXPECT_EQ(static_cast<uint32_t>(label.Position()) + 6u,
- __ GetAdjustedPosition(label.Position()));
-}
-
-TEST_F(AssemblerThumb2Test, LoadLiteralSingleMax64KiB) {
- // The literal size must match but the type doesn't, so use an int32_t rather than float.
- arm::Literal* literal = __ NewLiteral<int32_t>(0x12345678);
- __ LoadLiteral(arm::S3, literal);
- Label label;
- __ Bind(&label);
- constexpr size_t kLdrR0R0Count = (1 << 15) - 3u;
- for (size_t i = 0; i != kLdrR0R0Count; ++i) {
- __ ldr(arm::R0, arm::Address(arm::R0));
- }
-
- std::string expected =
- // "as" does not consider ((2f - 1f - 4) & 0xffff) a constant expression for movw.
- "movw ip, #(0x10004 - 0x4 - 4)\n"
- "1:\n"
- "add ip, pc\n"
- "vldr s3, [ip, #0]\n" +
- RepeatInsn(kLdrR0R0Count, "ldr r0, [r0]\n") +
- ".align 2, 0\n"
- "2:\n"
- ".word 0x12345678\n";
- DriverStr(expected, "LoadLiteralSingleMax64KiB");
-
- EXPECT_EQ(static_cast<uint32_t>(label.Position()) + 6u,
- __ GetAdjustedPosition(label.Position()));
-}
-
-TEST_F(AssemblerThumb2Test, LoadLiteralSingleMax64KiB_UnalignedPC) {
- // The literal size must match but the type doesn't, so use an int32_t rather than float.
- arm::Literal* literal = __ NewLiteral<int32_t>(0x12345678);
- __ ldr(arm::R0, arm::Address(arm::R0));
- __ LoadLiteral(arm::S3, literal);
- Label label;
- __ Bind(&label);
- constexpr size_t kLdrR0R0Count = (1 << 15) - 4u;
- for (size_t i = 0; i != kLdrR0R0Count; ++i) {
- __ ldr(arm::R0, arm::Address(arm::R0));
- }
-
- std::string expected =
- "ldr r0, [r0]\n"
- // "as" does not consider ((2f - 1f - 4) & 0xffff) a constant expression for movw.
- "movw ip, #(0x10004 - 0x6 - 4)\n"
- "1:\n"
- "add ip, pc\n"
- "vldr s3, [ip, #0]\n" +
- RepeatInsn(kLdrR0R0Count, "ldr r0, [r0]\n") +
- ".align 2, 0\n"
- "2:\n"
- ".word 0x12345678\n";
- DriverStr(expected, "LoadLiteralSingleMax64KiB_UnalignedPC");
-
- EXPECT_EQ(static_cast<uint32_t>(label.Position()) + 6u,
- __ GetAdjustedPosition(label.Position()));
-}
-
-TEST_F(AssemblerThumb2Test, LoadLiteralDoubleBeyondMax64KiB) {
- // The literal size must match but the type doesn't, so use an int64_t rather than double.
- arm::Literal* literal = __ NewLiteral<int64_t>(INT64_C(0x1234567887654321));
- __ LoadLiteral(arm::D3, literal);
- Label label;
- __ Bind(&label);
- constexpr size_t kLdrR0R0Count = (1 << 15) - 2u;
- for (size_t i = 0; i != kLdrR0R0Count; ++i) {
- __ ldr(arm::R0, arm::Address(arm::R0));
- }
-
- std::string expected =
- // "as" does not consider ((2f - 1f - 4) & 0xffff) a constant expression for movw.
- "movw ip, #((0x1000c - 0x8 - 4) & 0xffff)\n"
- // "as" does not consider ((2f - 1f - 4) >> 16) a constant expression for movt.
- "movt ip, #((0x1000c - 0x8 - 4) >> 16)\n"
- "1:\n"
- "add ip, pc\n"
- "vldr d3, [ip, #0]\n" +
- RepeatInsn(kLdrR0R0Count, "ldr r0, [r0]\n") +
- ".align 2, 0\n"
- "2:\n"
- ".word 0x87654321\n"
- ".word 0x12345678\n";
- DriverStr(expected, "LoadLiteralDoubleBeyondMax64KiB");
-
- EXPECT_EQ(static_cast<uint32_t>(label.Position()) + 10u,
- __ GetAdjustedPosition(label.Position()));
-}
-
-TEST_F(AssemblerThumb2Test, LoadLiteralDoubleFar) {
- // The literal size must match but the type doesn't, so use an int64_t rather than double.
- arm::Literal* literal = __ NewLiteral<int64_t>(INT64_C(0x1234567887654321));
- __ LoadLiteral(arm::D3, literal);
- Label label;
- __ Bind(&label);
- constexpr size_t kLdrR0R0Count = (1 << 15) - 2u + 0x1234;
- for (size_t i = 0; i != kLdrR0R0Count; ++i) {
- __ ldr(arm::R0, arm::Address(arm::R0));
- }
-
- std::string expected =
- // "as" does not consider ((2f - 1f - 4) & 0xffff) a constant expression for movw.
- "movw ip, #((0x1000c + 2 * 0x1234 - 0x8 - 4) & 0xffff)\n"
- // "as" does not consider ((2f - 1f - 4) >> 16) a constant expression for movt.
- "movt ip, #((0x1000c + 2 * 0x1234 - 0x8 - 4) >> 16)\n"
- "1:\n"
- "add ip, pc\n"
- "vldr d3, [ip, #0]\n" +
- RepeatInsn(kLdrR0R0Count, "ldr r0, [r0]\n") +
- ".align 2, 0\n"
- "2:\n"
- ".word 0x87654321\n"
- ".word 0x12345678\n";
- DriverStr(expected, "LoadLiteralDoubleFar");
-
- EXPECT_EQ(static_cast<uint32_t>(label.Position()) + 10u,
- __ GetAdjustedPosition(label.Position()));
-}
-
-TEST_F(AssemblerThumb2Test, LoadLiteralBeyondMax1KiBDueToAlignmentOnSecondPass) {
- // First part: as TwoCbzBeyondMaxOffset but add one 16-bit instruction to the end,
- // so that the size is not Aligned<4>(.). On the first pass, the assembler resizes
- // the second CBZ because it's out of range, then it will resize the first CBZ
- // which has been pushed out of range. Thus, after the first pass, the code size
- // will appear Aligned<4>(.) but the final size will not be.
- Label label0, label1, label2;
- __ cbz(arm::R0, &label1);
- constexpr size_t kLdrR0R0Count1 = 63;
- for (size_t i = 0; i != kLdrR0R0Count1; ++i) {
- __ ldr(arm::R0, arm::Address(arm::R0));
- }
- __ Bind(&label0);
- __ cbz(arm::R0, &label2);
- __ Bind(&label1);
- constexpr size_t kLdrR0R0Count2 = 65;
- for (size_t i = 0; i != kLdrR0R0Count2; ++i) {
- __ ldr(arm::R0, arm::Address(arm::R0));
- }
- __ Bind(&label2);
- __ ldr(arm::R0, arm::Address(arm::R0));
-
- std::string expected_part1 =
- "cmp r0, #0\n" // cbz r0, label1
- "beq.n 1f\n" +
- RepeatInsn(kLdrR0R0Count1, "ldr r0, [r0]\n") +
- "0:\n"
- "cmp r0, #0\n" // cbz r0, label2
- "beq.n 2f\n"
- "1:\n" +
- RepeatInsn(kLdrR0R0Count2, "ldr r0, [r0]\n") +
- "2:\n" // Here the offset is Aligned<4>(.).
- "ldr r0, [r0]\n"; // Make the first part
-
- // Second part: as LoadLiteralMax1KiB with the caveat that the offset of the load
- // literal will not be Aligned<4>(.) but it will appear to be when we process the
- // instruction during the first pass, so the literal will need a padding and it
- // will push the literal out of range, so we shall end up with "ldr.w".
- arm::Literal* literal = __ NewLiteral<int32_t>(0x12345678);
- __ LoadLiteral(arm::R0, literal);
- Label label;
- __ Bind(&label);
- constexpr size_t kLdrR0R0Count = 511;
- for (size_t i = 0; i != kLdrR0R0Count; ++i) {
- __ ldr(arm::R0, arm::Address(arm::R0));
- }
-
- std::string expected =
- expected_part1 +
- "1:\n"
- "ldr.w r0, [pc, #((2f - 1b - 2) & ~2)]\n" +
- RepeatInsn(kLdrR0R0Count, "ldr r0, [r0]\n") +
- ".align 2, 0\n"
- "2:\n"
- ".word 0x12345678\n";
- DriverStr(expected, "LoadLiteralMax1KiB");
-
- EXPECT_EQ(static_cast<uint32_t>(label.Position()) + 6u,
- __ GetAdjustedPosition(label.Position()));
-}
-
-TEST_F(AssemblerThumb2Test, BindTrackedLabel) {
- Label non_tracked, tracked, branch_target;
-
- // A few dummy loads on entry.
- constexpr size_t kLdrR0R0Count = 5;
- for (size_t i = 0; i != kLdrR0R0Count; ++i) {
- __ ldr(arm::R0, arm::Address(arm::R0));
- }
-
- // A branch that will need to be fixed up.
- __ cbz(arm::R0, &branch_target);
-
- // Some more dummy loads.
- for (size_t i = 0; i != kLdrR0R0Count; ++i) {
- __ ldr(arm::R0, arm::Address(arm::R0));
- }
-
- // Now insert tracked and untracked label.
- __ Bind(&non_tracked);
- __ BindTrackedLabel(&tracked);
-
- // A lot of dummy loads, to ensure the branch needs resizing.
- constexpr size_t kLdrR0R0CountLong = 60;
- for (size_t i = 0; i != kLdrR0R0CountLong; ++i) {
- __ ldr(arm::R0, arm::Address(arm::R0));
- }
-
- // Bind the branch target.
- __ Bind(&branch_target);
-
- // One more load.
- __ ldr(arm::R0, arm::Address(arm::R0));
-
- std::string expected =
- RepeatInsn(kLdrR0R0Count, "ldr r0, [r0]\n") +
- "cmp r0, #0\n" // cbz r0, 1f
- "beq.n 1f\n" +
- RepeatInsn(kLdrR0R0Count + kLdrR0R0CountLong, "ldr r0, [r0]\n") +
- "1:\n"
- "ldr r0, [r0]\n";
- DriverStr(expected, "BindTrackedLabel");
-
- // Expectation is that the tracked label should have moved.
- EXPECT_LT(non_tracked.Position(), tracked.Position());
-}
-
-TEST_F(AssemblerThumb2Test, JumpTable) {
- // The jump table. Use three labels.
- Label label1, label2, label3;
- std::vector<Label*> labels({ &label1, &label2, &label3 });
-
- // A few dummy loads on entry, interspersed with 2 labels.
- constexpr size_t kLdrR0R0Count = 5;
- for (size_t i = 0; i != kLdrR0R0Count; ++i) {
- __ ldr(arm::R0, arm::Address(arm::R0));
- }
- __ BindTrackedLabel(&label1);
- for (size_t i = 0; i != kLdrR0R0Count; ++i) {
- __ ldr(arm::R0, arm::Address(arm::R0));
- }
- __ BindTrackedLabel(&label2);
- for (size_t i = 0; i != kLdrR0R0Count; ++i) {
- __ ldr(arm::R0, arm::Address(arm::R0));
- }
-
- // Create the jump table, emit the base load.
- arm::JumpTable* jump_table = __ CreateJumpTable(std::move(labels), arm::R1);
-
- // Dummy computation, stand-in for the address. We're only testing the jump table here, not how
- // it's being used.
- __ ldr(arm::R0, arm::Address(arm::R0));
-
- // Emit the jump
- __ EmitJumpTableDispatch(jump_table, arm::R1);
-
- // Some more dummy instructions.
- for (size_t i = 0; i != kLdrR0R0Count; ++i) {
- __ ldr(arm::R0, arm::Address(arm::R0));
- }
- __ BindTrackedLabel(&label3);
- for (size_t i = 0; i != kLdrR0R0Count; ++i) { // Note: odd so there's no alignment
- __ ldr(arm::R0, arm::Address(arm::R0)); // necessary, as gcc as emits nops,
- } // whereas we emit 0 != nop.
-
- static_assert((kLdrR0R0Count + 3) * 2 < 1 * KB, "Too much offset");
-
- std::string expected =
- RepeatInsn(kLdrR0R0Count, "ldr r0, [r0]\n") +
- ".L1:\n" +
- RepeatInsn(kLdrR0R0Count, "ldr r0, [r0]\n") +
- ".L2:\n" +
- RepeatInsn(kLdrR0R0Count, "ldr r0, [r0]\n") +
- "adr r1, .Ljump_table\n"
- "ldr r0, [r0]\n"
- ".Lbase:\n"
- "add pc, r1\n" +
- RepeatInsn(kLdrR0R0Count, "ldr r0, [r0]\n") +
- ".L3:\n" +
- RepeatInsn(kLdrR0R0Count, "ldr r0, [r0]\n") +
- ".align 2\n"
- ".Ljump_table:\n"
- ".4byte (.L1 - .Lbase - 4)\n"
- ".4byte (.L2 - .Lbase - 4)\n"
- ".4byte (.L3 - .Lbase - 4)\n";
- DriverStr(expected, "JumpTable");
-}
-
-// Test for >1K fixup.
-TEST_F(AssemblerThumb2Test, JumpTable4K) {
- // The jump table. Use three labels.
- Label label1, label2, label3;
- std::vector<Label*> labels({ &label1, &label2, &label3 });
-
- // A few dummy loads on entry, interspersed with 2 labels.
- constexpr size_t kLdrR0R0Count = 5;
- for (size_t i = 0; i != kLdrR0R0Count; ++i) {
- __ ldr(arm::R0, arm::Address(arm::R0));
- }
- __ BindTrackedLabel(&label1);
- for (size_t i = 0; i != kLdrR0R0Count; ++i) {
- __ ldr(arm::R0, arm::Address(arm::R0));
- }
- __ BindTrackedLabel(&label2);
- for (size_t i = 0; i != kLdrR0R0Count; ++i) {
- __ ldr(arm::R0, arm::Address(arm::R0));
- }
-
- // Create the jump table, emit the base load.
- arm::JumpTable* jump_table = __ CreateJumpTable(std::move(labels), arm::R1);
-
- // Dummy computation, stand-in for the address. We're only testing the jump table here, not how
- // it's being used.
- __ ldr(arm::R0, arm::Address(arm::R0));
-
- // Emit the jump
- __ EmitJumpTableDispatch(jump_table, arm::R1);
-
- // Some more dummy instructions.
- for (size_t i = 0; i != kLdrR0R0Count; ++i) {
- __ ldr(arm::R0, arm::Address(arm::R0));
- }
- __ BindTrackedLabel(&label3);
- constexpr size_t kLdrR0R0Count2 = 600; // Note: even so there's no alignment
- for (size_t i = 0; i != kLdrR0R0Count2; ++i) { // necessary, as gcc as emits nops,
- __ ldr(arm::R0, arm::Address(arm::R0)); // whereas we emit 0 != nop.
- }
-
- static_assert((kLdrR0R0Count + kLdrR0R0Count2 + 3) * 2 > 1 * KB, "Not enough offset");
- static_assert((kLdrR0R0Count + kLdrR0R0Count2 + 3) * 2 < 4 * KB, "Too much offset");
-
- std::string expected =
- RepeatInsn(kLdrR0R0Count, "ldr r0, [r0]\n") +
- ".L1:\n" +
- RepeatInsn(kLdrR0R0Count, "ldr r0, [r0]\n") +
- ".L2:\n" +
- RepeatInsn(kLdrR0R0Count, "ldr r0, [r0]\n") +
- "adr r1, .Ljump_table\n"
- "ldr r0, [r0]\n"
- ".Lbase:\n"
- "add pc, r1\n" +
- RepeatInsn(kLdrR0R0Count, "ldr r0, [r0]\n") +
- ".L3:\n" +
- RepeatInsn(kLdrR0R0Count2, "ldr r0, [r0]\n") +
- ".align 2\n"
- ".Ljump_table:\n"
- ".4byte (.L1 - .Lbase - 4)\n"
- ".4byte (.L2 - .Lbase - 4)\n"
- ".4byte (.L3 - .Lbase - 4)\n";
- DriverStr(expected, "JumpTable4K");
-}
-
-// Test for >4K fixup.
-TEST_F(AssemblerThumb2Test, JumpTable64K) {
- // The jump table. Use three labels.
- Label label1, label2, label3;
- std::vector<Label*> labels({ &label1, &label2, &label3 });
-
- // A few dummy loads on entry, interspersed with 2 labels.
- constexpr size_t kLdrR0R0Count = 5;
- for (size_t i = 0; i != kLdrR0R0Count; ++i) {
- __ ldr(arm::R0, arm::Address(arm::R0));
- }
- __ BindTrackedLabel(&label1);
- for (size_t i = 0; i != kLdrR0R0Count; ++i) {
- __ ldr(arm::R0, arm::Address(arm::R0));
- }
- __ BindTrackedLabel(&label2);
- for (size_t i = 0; i != kLdrR0R0Count; ++i) {
- __ ldr(arm::R0, arm::Address(arm::R0));
- }
-
- // Create the jump table, emit the base load.
- arm::JumpTable* jump_table = __ CreateJumpTable(std::move(labels), arm::R1);
-
- // Dummy computation, stand-in for the address. We're only testing the jump table here, not how
- // it's being used.
- __ ldr(arm::R0, arm::Address(arm::R0));
-
- // Emit the jump
- __ EmitJumpTableDispatch(jump_table, arm::R1);
-
- // Some more dummy instructions.
- for (size_t i = 0; i != kLdrR0R0Count; ++i) {
- __ ldr(arm::R0, arm::Address(arm::R0));
- }
- __ BindTrackedLabel(&label3);
- constexpr size_t kLdrR0R0Count2 = 2601; // Note: odd so there's no alignment
- for (size_t i = 0; i != kLdrR0R0Count2; ++i) { // necessary, as gcc as emits nops,
- __ ldr(arm::R0, arm::Address(arm::R0)); // whereas we emit 0 != nop.
- }
-
- static_assert((kLdrR0R0Count + kLdrR0R0Count2 + 3) * 2 > 4 * KB, "Not enough offset");
- static_assert((kLdrR0R0Count + kLdrR0R0Count2 + 3) * 2 < 64 * KB, "Too much offset");
-
- std::string expected =
- RepeatInsn(kLdrR0R0Count, "ldr r0, [r0]\n") +
- ".L1:\n" +
- RepeatInsn(kLdrR0R0Count, "ldr r0, [r0]\n") +
- ".L2:\n" +
- RepeatInsn(kLdrR0R0Count, "ldr r0, [r0]\n") +
- // ~ adr r1, .Ljump_table, gcc as can't seem to fix up a large offset itself.
- // (Note: have to use constants, as labels aren't accepted.
- "movw r1, #(((3 + " + StringPrintf("%zu", kLdrR0R0Count + kLdrR0R0Count2) +
- ") * 2 - 4) & 0xFFFF)\n"
- "add r1, pc\n"
- "ldr r0, [r0]\n"
- ".Lbase:\n"
- "add pc, r1\n" +
- RepeatInsn(kLdrR0R0Count, "ldr r0, [r0]\n") +
- ".L3:\n" +
- RepeatInsn(kLdrR0R0Count2, "ldr r0, [r0]\n") +
- ".align 2\n"
- ".Ljump_table:\n"
- ".4byte (.L1 - .Lbase - 4)\n"
- ".4byte (.L2 - .Lbase - 4)\n"
- ".4byte (.L3 - .Lbase - 4)\n";
- DriverStr(expected, "JumpTable64K");
-}
-
-// Test for >64K fixup.
-TEST_F(AssemblerThumb2Test, JumpTableFar) {
- // The jump table. Use three labels.
- Label label1, label2, label3;
- std::vector<Label*> labels({ &label1, &label2, &label3 });
-
- // A few dummy loads on entry, interspersed with 2 labels.
- constexpr size_t kLdrR0R0Count = 5;
- for (size_t i = 0; i != kLdrR0R0Count; ++i) {
- __ ldr(arm::R0, arm::Address(arm::R0));
- }
- __ BindTrackedLabel(&label1);
- for (size_t i = 0; i != kLdrR0R0Count; ++i) {
- __ ldr(arm::R0, arm::Address(arm::R0));
- }
- __ BindTrackedLabel(&label2);
- for (size_t i = 0; i != kLdrR0R0Count; ++i) {
- __ ldr(arm::R0, arm::Address(arm::R0));
- }
-
- // Create the jump table, emit the base load.
- arm::JumpTable* jump_table = __ CreateJumpTable(std::move(labels), arm::R1);
-
- // Dummy computation, stand-in for the address. We're only testing the jump table here, not how
- // it's being used.
- __ ldr(arm::R0, arm::Address(arm::R0));
-
- // Emit the jump
- __ EmitJumpTableDispatch(jump_table, arm::R1);
-
- // Some more dummy instructions.
- for (size_t i = 0; i != kLdrR0R0Count; ++i) {
- __ ldr(arm::R0, arm::Address(arm::R0));
- }
- __ BindTrackedLabel(&label3);
- constexpr size_t kLdrR0R0Count2 = 70001; // Note: odd so there's no alignment
- for (size_t i = 0; i != kLdrR0R0Count2; ++i) { // necessary, as gcc as emits nops,
- __ ldr(arm::R0, arm::Address(arm::R0)); // whereas we emit 0 != nop.
- }
-
- static_assert((kLdrR0R0Count + kLdrR0R0Count2 + 3) * 2 > 64 * KB, "Not enough offset");
-
- std::string expected =
- RepeatInsn(kLdrR0R0Count, "ldr r0, [r0]\n") +
- ".L1:\n" +
- RepeatInsn(kLdrR0R0Count, "ldr r0, [r0]\n") +
- ".L2:\n" +
- RepeatInsn(kLdrR0R0Count, "ldr r0, [r0]\n") +
- // ~ adr r1, .Ljump_table, gcc as can't seem to fix up a large offset itself.
- // (Note: have to use constants, as labels aren't accepted.
- "movw r1, #(((3 + " + StringPrintf("%zu", kLdrR0R0Count + kLdrR0R0Count2) +
- ") * 2 - 4) & 0xFFFF)\n"
- "movt r1, #(((3 + " + StringPrintf("%zu", kLdrR0R0Count + kLdrR0R0Count2) +
- ") * 2 - 4) >> 16)\n"
- ".Lhelp:"
- "add r1, pc\n"
- "ldr r0, [r0]\n"
- ".Lbase:\n"
- "add pc, r1\n" +
- RepeatInsn(kLdrR0R0Count, "ldr r0, [r0]\n") +
- ".L3:\n" +
- RepeatInsn(kLdrR0R0Count2, "ldr r0, [r0]\n") +
- ".align 2\n"
- ".Ljump_table:\n"
- ".4byte (.L1 - .Lbase - 4)\n"
- ".4byte (.L2 - .Lbase - 4)\n"
- ".4byte (.L3 - .Lbase - 4)\n";
- DriverStr(expected, "JumpTableFar");
-}
-
-TEST_F(AssemblerThumb2Test, Clz) {
- __ clz(arm::R0, arm::R1);
-
- const char* expected = "clz r0, r1\n";
-
- DriverStr(expected, "clz");
-}
-
-TEST_F(AssemblerThumb2Test, rbit) {
- __ rbit(arm::R1, arm::R0);
-
- const char* expected = "rbit r1, r0\n";
-
- DriverStr(expected, "rbit");
-}
-
-TEST_F(AssemblerThumb2Test, rev) {
- __ rev(arm::R1, arm::R0);
-
- const char* expected = "rev r1, r0\n";
-
- DriverStr(expected, "rev");
-}
-
-TEST_F(AssemblerThumb2Test, rev16) {
- __ rev16(arm::R1, arm::R0);
-
- const char* expected = "rev16 r1, r0\n";
-
- DriverStr(expected, "rev16");
-}
-
-TEST_F(AssemblerThumb2Test, revsh) {
- __ revsh(arm::R1, arm::R0);
-
- const char* expected = "revsh r1, r0\n";
-
- DriverStr(expected, "revsh");
-}
-
-TEST_F(AssemblerThumb2Test, vcnt) {
- // Different D register numbers are used here, to test register encoding.
- // Source register number is encoded as M:Vm, destination register number is encoded as D:Vd,
- // For source and destination registers which use D0..D15, the M bit and D bit should be 0.
- // For source and destination registers which use D16..D32, the M bit and D bit should be 1.
- __ vcntd(arm::D0, arm::D1);
- __ vcntd(arm::D19, arm::D20);
- __ vcntd(arm::D0, arm::D9);
- __ vcntd(arm::D16, arm::D20);
-
- std::string expected =
- "vcnt.8 d0, d1\n"
- "vcnt.8 d19, d20\n"
- "vcnt.8 d0, d9\n"
- "vcnt.8 d16, d20\n";
-
- DriverStr(expected, "vcnt");
-}
-
-TEST_F(AssemblerThumb2Test, vpaddl) {
- // Different D register numbers are used here, to test register encoding.
- // Source register number is encoded as M:Vm, destination register number is encoded as D:Vd,
- // For source and destination registers which use D0..D15, the M bit and D bit should be 0.
- // For source and destination registers which use D16..D32, the M bit and D bit should be 1.
- // Different data types (signed and unsigned) are also tested.
- __ vpaddld(arm::D0, arm::D0, 8, true);
- __ vpaddld(arm::D20, arm::D20, 8, false);
- __ vpaddld(arm::D0, arm::D20, 16, false);
- __ vpaddld(arm::D20, arm::D0, 32, true);
-
- std::string expected =
- "vpaddl.u8 d0, d0\n"
- "vpaddl.s8 d20, d20\n"
- "vpaddl.s16 d0, d20\n"
- "vpaddl.u32 d20, d0\n";
-
- DriverStr(expected, "vpaddl");
-}
-
-TEST_F(AssemblerThumb2Test, LoadFromShiftedRegOffset) {
- arm::Address mem_address(arm::R0, arm::R1, arm::Shift::LSL, 2);
-
- __ ldrsb(arm::R2, mem_address);
- __ ldrb(arm::R2, mem_address);
- __ ldrsh(arm::R2, mem_address);
- __ ldrh(arm::R2, mem_address);
- __ ldr(arm::R2, mem_address);
-
- std::string expected =
- "ldrsb r2, [r0, r1, LSL #2]\n"
- "ldrb r2, [r0, r1, LSL #2]\n"
- "ldrsh r2, [r0, r1, LSL #2]\n"
- "ldrh r2, [r0, r1, LSL #2]\n"
- "ldr r2, [r0, r1, LSL #2]\n";
-
- DriverStr(expected, "LoadFromShiftedRegOffset");
-}
-
-TEST_F(AssemblerThumb2Test, VStmLdmPushPop) {
- // Different D register numbers are used here, to test register encoding.
- // Source register number is encoded as M:Vm, destination register number is encoded as D:Vd,
- // For source and destination registers which use D0..D15, the M bit and D bit should be 0.
- // For source and destination registers which use D16..D32, the M bit and D bit should be 1.
- // Different data types (signed and unsigned) are also tested.
- __ vstmiad(arm::R0, arm::D0, 4);
- __ vldmiad(arm::R1, arm::D9, 5);
- __ vpopd(arm::D0, 4);
- __ vpushd(arm::D9, 5);
- __ vpops(arm::S0, 4);
- __ vpushs(arm::S9, 5);
- __ vpushs(arm::S16, 5);
- __ vpushd(arm::D0, 16);
- __ vpushd(arm::D1, 15);
- __ vpushd(arm::D8, 16);
- __ vpushd(arm::D31, 1);
- __ vpushs(arm::S0, 32);
- __ vpushs(arm::S1, 31);
- __ vpushs(arm::S16, 16);
- __ vpushs(arm::S31, 1);
-
- std::string expected =
- "vstmia r0, {d0 - d3}\n"
- "vldmia r1, {d9 - d13}\n"
- "vpop {d0 - d3}\n"
- "vpush {d9 - d13}\n"
- "vpop {s0 - s3}\n"
- "vpush {s9 - s13}\n"
- "vpush {s16 - s20}\n"
- "vpush {d0 - d15}\n"
- "vpush {d1 - d15}\n"
- "vpush {d8 - d23}\n"
- "vpush {d31}\n"
- "vpush {s0 - s31}\n"
- "vpush {s1 - s31}\n"
- "vpush {s16 - s31}\n"
- "vpush {s31}\n";
-
- DriverStr(expected, "VStmLdmPushPop");
-}
-
-} // namespace art
diff --git a/compiler/utils/arm/constants_arm.cc b/compiler/utils/arm/constants_arm.cc
new file mode 100644
index 0000000..b02b343
--- /dev/null
+++ b/compiler/utils/arm/constants_arm.cc
@@ -0,0 +1,32 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "constants_arm.h"
+
+namespace art {
+namespace arm {
+
+std::ostream& operator<<(std::ostream& os, const DRegister& rhs) {
+ if (rhs >= D0 && rhs < kNumberOfDRegisters) {
+ os << "d" << static_cast<int>(rhs);
+ } else {
+ os << "DRegister[" << static_cast<int>(rhs) << "]";
+ }
+ return os;
+}
+
+} // namespace arm
+} // namespace art
diff --git a/compiler/utils/arm/constants_arm.h b/compiler/utils/arm/constants_arm.h
index 2060064..5b87e3e 100644
--- a/compiler/utils/arm/constants_arm.h
+++ b/compiler/utils/arm/constants_arm.h
@@ -97,37 +97,6 @@
};
std::ostream& operator<<(std::ostream& os, const DRegister& rhs);
-
-// Values for the condition field as defined in Table A8-1 "Condition
-// codes" (refer to Section A8.3 "Conditional execution").
-enum Condition { // private marker to avoid generate-operator-out.py from processing.
- kNoCondition = -1,
- // Meaning (integer) | Meaning (floating-point)
- // ---------------------------------------+-----------------------------------------
- EQ = 0, // Equal | Equal
- NE = 1, // Not equal | Not equal, or unordered
- CS = 2, // Carry set | Greater than, equal, or unordered
- CC = 3, // Carry clear | Less than
- MI = 4, // Minus, negative | Less than
- PL = 5, // Plus, positive or zero | Greater than, equal, or unordered
- VS = 6, // Overflow | Unordered (i.e. at least one NaN operand)
- VC = 7, // No overflow | Not unordered
- HI = 8, // Unsigned higher | Greater than, or unordered
- LS = 9, // Unsigned lower or same | Less than or equal
- GE = 10, // Signed greater than or equal | Greater than or equal
- LT = 11, // Signed less than | Less than, or unordered
- GT = 12, // Signed greater than | Greater than
- LE = 13, // Signed less than or equal | Less than, equal, or unordered
- AL = 14, // Always (unconditional) | Always (unconditional)
- kSpecialCondition = 15, // Special condition (refer to Section A8.3 "Conditional execution").
- kMaxCondition = 16,
-
- HS = CS, // HS (unsigned higher or same) is a synonym for CS.
- LO = CC // LO (unsigned lower) is a synonym for CC.
-};
-std::ostream& operator<<(std::ostream& os, const Condition& rhs);
-
-
// Opcodes for Data-processing instructions (instructions with a type 0 and 1)
// as defined in section A3.4
enum Opcode {
@@ -151,70 +120,6 @@
ORN = 16, // Logical OR NOT.
kMaxOperand = 17
};
-std::ostream& operator<<(std::ostream& os, const Opcode& rhs);
-
-// Shifter types for Data-processing operands as defined in section A5.1.2.
-enum Shift {
- kNoShift = -1,
- LSL = 0, // Logical shift left
- LSR = 1, // Logical shift right
- ASR = 2, // Arithmetic shift right
- ROR = 3, // Rotate right
- RRX = 4, // Rotate right with extend.
- kMaxShift
-};
-std::ostream& operator<<(std::ostream& os, const Shift& rhs);
-
-// Constants used for the decoding or encoding of the individual fields of
-// instructions. Based on the "Figure 3-1 ARM instruction set summary".
-enum InstructionFields { // private marker to avoid generate-operator-out.py from processing.
- kConditionShift = 28,
- kConditionBits = 4,
- kTypeShift = 25,
- kTypeBits = 3,
- kLinkShift = 24,
- kLinkBits = 1,
- kUShift = 23,
- kUBits = 1,
- kOpcodeShift = 21,
- kOpcodeBits = 4,
- kSShift = 20,
- kSBits = 1,
- kRnShift = 16,
- kRnBits = 4,
- kRdShift = 12,
- kRdBits = 4,
- kRsShift = 8,
- kRsBits = 4,
- kRmShift = 0,
- kRmBits = 4,
-
- // Immediate instruction fields encoding.
- kRotateShift = 8,
- kRotateBits = 4,
- kImmed8Shift = 0,
- kImmed8Bits = 8,
-
- // Shift instruction register fields encodings.
- kShiftImmShift = 7,
- kShiftRegisterShift = 8,
- kShiftImmBits = 5,
- kShiftShift = 5,
- kShiftBits = 2,
-
- // Load/store instruction offset field encoding.
- kOffset12Shift = 0,
- kOffset12Bits = 12,
- kOffset12Mask = 0x00000fff,
-
- // Mul instruction register fields encodings.
- kMulRdShift = 16,
- kMulRdBits = 4,
- kMulRnShift = 12,
- kMulRnBits = 4,
-
- kBranchOffsetMask = 0x00ffffff
-};
// Size (in bytes) of registers.
const int kRegisterSize = 4;
@@ -222,231 +127,6 @@
// List of registers used in load/store multiple.
typedef uint16_t RegList;
-// The class Instr enables access to individual fields defined in the ARM
-// architecture instruction set encoding as described in figure A3-1.
-//
-// Example: Test whether the instruction at ptr does set the condition code
-// bits.
-//
-// bool InstructionSetsConditionCodes(uint8_t* ptr) {
-// Instr* instr = Instr::At(ptr);
-// int type = instr->TypeField();
-// return ((type == 0) || (type == 1)) && instr->HasS();
-// }
-//
-class Instr {
- public:
- enum {
- kInstrSize = 4,
- kInstrSizeLog2 = 2,
- kPCReadOffset = 8
- };
-
- bool IsBreakPoint() {
- return IsBkpt();
- }
-
- // Get the raw instruction bits.
- int32_t InstructionBits() const {
- return *reinterpret_cast<const int32_t*>(this);
- }
-
- // Set the raw instruction bits to value.
- void SetInstructionBits(int32_t value) {
- *reinterpret_cast<int32_t*>(this) = value;
- }
-
- // Read one particular bit out of the instruction bits.
- int Bit(int nr) const {
- return (InstructionBits() >> nr) & 1;
- }
-
- // Read a bit field out of the instruction bits.
- int Bits(int shift, int count) const {
- return (InstructionBits() >> shift) & ((1 << count) - 1);
- }
-
-
- // Accessors for the different named fields used in the ARM encoding.
- // The naming of these accessor corresponds to figure A3-1.
- // Generally applicable fields
- Condition ConditionField() const {
- return static_cast<Condition>(Bits(kConditionShift, kConditionBits));
- }
- int TypeField() const { return Bits(kTypeShift, kTypeBits); }
-
- Register RnField() const { return static_cast<Register>(
- Bits(kRnShift, kRnBits)); }
- Register RdField() const { return static_cast<Register>(
- Bits(kRdShift, kRdBits)); }
-
- // Fields used in Data processing instructions
- Opcode OpcodeField() const {
- return static_cast<Opcode>(Bits(kOpcodeShift, kOpcodeBits));
- }
- int SField() const { return Bits(kSShift, kSBits); }
- // with register
- Register RmField() const {
- return static_cast<Register>(Bits(kRmShift, kRmBits));
- }
- Shift ShiftField() const { return static_cast<Shift>(
- Bits(kShiftShift, kShiftBits)); }
- int RegShiftField() const { return Bit(4); }
- Register RsField() const {
- return static_cast<Register>(Bits(kRsShift, kRsBits));
- }
- int ShiftAmountField() const { return Bits(kShiftImmShift,
- kShiftImmBits); }
- // with immediate
- int RotateField() const { return Bits(kRotateShift, kRotateBits); }
- int Immed8Field() const { return Bits(kImmed8Shift, kImmed8Bits); }
-
- // Fields used in Load/Store instructions
- int PUField() const { return Bits(23, 2); }
- int BField() const { return Bit(22); }
- int WField() const { return Bit(21); }
- int LField() const { return Bit(20); }
- // with register uses same fields as Data processing instructions above
- // with immediate
- int Offset12Field() const { return Bits(kOffset12Shift,
- kOffset12Bits); }
- // multiple
- int RlistField() const { return Bits(0, 16); }
- // extra loads and stores
- int SignField() const { return Bit(6); }
- int HField() const { return Bit(5); }
- int ImmedHField() const { return Bits(8, 4); }
- int ImmedLField() const { return Bits(0, 4); }
-
- // Fields used in Branch instructions
- int LinkField() const { return Bits(kLinkShift, kLinkBits); }
- int SImmed24Field() const { return ((InstructionBits() << 8) >> 8); }
-
- // Fields used in Supervisor Call instructions
- uint32_t SvcField() const { return Bits(0, 24); }
-
- // Field used in Breakpoint instruction
- uint16_t BkptField() const {
- return ((Bits(8, 12) << 4) | Bits(0, 4));
- }
-
- // Field used in 16-bit immediate move instructions
- uint16_t MovwField() const {
- return ((Bits(16, 4) << 12) | Bits(0, 12));
- }
-
- // Field used in VFP float immediate move instruction
- float ImmFloatField() const {
- uint32_t imm32 = (Bit(19) << 31) | (((1 << 5) - Bit(18)) << 25) |
- (Bits(16, 2) << 23) | (Bits(0, 4) << 19);
- return bit_cast<float, uint32_t>(imm32);
- }
-
- // Field used in VFP double immediate move instruction
- double ImmDoubleField() const {
- uint64_t imm64 = (Bit(19)*(1LL << 63)) | (((1LL << 8) - Bit(18)) << 54) |
- (Bits(16, 2)*(1LL << 52)) | (Bits(0, 4)*(1LL << 48));
- return bit_cast<double, uint64_t>(imm64);
- }
-
- // Test for data processing instructions of type 0 or 1.
- // See "ARM Architecture Reference Manual ARMv7-A and ARMv7-R edition",
- // section A5.1 "ARM instruction set encoding".
- bool IsDataProcessing() const {
- CHECK_NE(ConditionField(), kSpecialCondition);
- CHECK_EQ(Bits(26, 2), 0); // Type 0 or 1.
- return ((Bits(20, 5) & 0x19) != 0x10) &&
- ((Bit(25) == 1) || // Data processing immediate.
- (Bit(4) == 0) || // Data processing register.
- (Bit(7) == 0)); // Data processing register-shifted register.
- }
-
- // Tests for special encodings of type 0 instructions (extra loads and stores,
- // as well as multiplications, synchronization primitives, and miscellaneous).
- // Can only be called for a type 0 or 1 instruction.
- bool IsMiscellaneous() const {
- CHECK_EQ(Bits(26, 2), 0); // Type 0 or 1.
- return ((Bit(25) == 0) && ((Bits(20, 5) & 0x19) == 0x10) && (Bit(7) == 0));
- }
- bool IsMultiplyOrSyncPrimitive() const {
- CHECK_EQ(Bits(26, 2), 0); // Type 0 or 1.
- return ((Bit(25) == 0) && (Bits(4, 4) == 9));
- }
-
- // Test for Supervisor Call instruction.
- bool IsSvc() const {
- return ((InstructionBits() & 0xff000000) == 0xef000000);
- }
-
- // Test for Breakpoint instruction.
- bool IsBkpt() const {
- return ((InstructionBits() & 0xfff000f0) == 0xe1200070);
- }
-
- // VFP register fields.
- SRegister SnField() const {
- return static_cast<SRegister>((Bits(kRnShift, kRnBits) << 1) + Bit(7));
- }
- SRegister SdField() const {
- return static_cast<SRegister>((Bits(kRdShift, kRdBits) << 1) + Bit(22));
- }
- SRegister SmField() const {
- return static_cast<SRegister>((Bits(kRmShift, kRmBits) << 1) + Bit(5));
- }
- DRegister DnField() const {
- return static_cast<DRegister>(Bits(kRnShift, kRnBits) + (Bit(7) << 4));
- }
- DRegister DdField() const {
- return static_cast<DRegister>(Bits(kRdShift, kRdBits) + (Bit(22) << 4));
- }
- DRegister DmField() const {
- return static_cast<DRegister>(Bits(kRmShift, kRmBits) + (Bit(5) << 4));
- }
-
- // Test for VFP data processing or single transfer instructions of type 7.
- bool IsVFPDataProcessingOrSingleTransfer() const {
- CHECK_NE(ConditionField(), kSpecialCondition);
- CHECK_EQ(TypeField(), 7);
- return ((Bit(24) == 0) && (Bits(9, 3) == 5));
- // Bit(4) == 0: Data Processing
- // Bit(4) == 1: 8, 16, or 32-bit Transfer between ARM Core and VFP
- }
-
- // Test for VFP 64-bit transfer instructions of type 6.
- bool IsVFPDoubleTransfer() const {
- CHECK_NE(ConditionField(), kSpecialCondition);
- CHECK_EQ(TypeField(), 6);
- return ((Bits(21, 4) == 2) && (Bits(9, 3) == 5) &&
- ((Bits(4, 4) & 0xd) == 1));
- }
-
- // Test for VFP load and store instructions of type 6.
- bool IsVFPLoadStore() const {
- CHECK_NE(ConditionField(), kSpecialCondition);
- CHECK_EQ(TypeField(), 6);
- return ((Bits(20, 5) & 0x12) == 0x10) && (Bits(9, 3) == 5);
- }
-
- // Special accessors that test for existence of a value.
- bool HasS() const { return SField() == 1; }
- bool HasB() const { return BField() == 1; }
- bool HasW() const { return WField() == 1; }
- bool HasL() const { return LField() == 1; }
- bool HasSign() const { return SignField() == 1; }
- bool HasH() const { return HField() == 1; }
- bool HasLink() const { return LinkField() == 1; }
-
- // Instructions are read out of a code stream. The only way to get a
- // reference to an instruction is to convert a pointer. There is no way
- // to allocate or create instances of class Instr.
- // Use the At(pc) function to create references to Instr.
- static Instr* At(uintptr_t pc) { return reinterpret_cast<Instr*>(pc); }
- Instr* Next() { return this + kInstrSize; }
-
- private:
- // We need to prevent the creation of instances of class Instr.
- DISALLOW_IMPLICIT_CONSTRUCTORS(Instr);
-};
} // namespace arm
} // namespace art
diff --git a/compiler/utils/assembler.cc b/compiler/utils/assembler.cc
index 57f3b15..25eca23 100644
--- a/compiler/utils/assembler.cc
+++ b/compiler/utils/assembler.cc
@@ -19,24 +19,6 @@
#include <algorithm>
#include <vector>
-#ifdef ART_ENABLE_CODEGEN_arm
-#include "arm/assembler_thumb2.h"
-#endif
-#ifdef ART_ENABLE_CODEGEN_arm64
-#include "arm64/assembler_arm64.h"
-#endif
-#ifdef ART_ENABLE_CODEGEN_mips
-#include "mips/assembler_mips.h"
-#endif
-#ifdef ART_ENABLE_CODEGEN_mips64
-#include "mips64/assembler_mips64.h"
-#endif
-#ifdef ART_ENABLE_CODEGEN_x86
-#include "x86/assembler_x86.h"
-#endif
-#ifdef ART_ENABLE_CODEGEN_x86_64
-#include "x86_64/assembler_x86_64.h"
-#endif
#include "base/casts.h"
#include "globals.h"
#include "memory_region.h"
diff --git a/compiler/utils/assembler_thumb_test.cc b/compiler/utils/assembler_thumb_test.cc
index 759ed38..741beab 100644
--- a/compiler/utils/assembler_thumb_test.cc
+++ b/compiler/utils/assembler_thumb_test.cc
@@ -22,7 +22,6 @@
#include <sys/types.h>
#include "gtest/gtest.h"
-#include "utils/arm/assembler_thumb2.h"
#include "jni/quick/calling_convention.h"
#include "utils/arm/jni_macro_assembler_arm_vixl.h"
@@ -176,1451 +175,18 @@
#endif // ART_TARGET_ANDROID
}
-#define __ assembler->
-
-void EmitAndCheck(arm::Thumb2Assembler* assembler, const char* testname,
- const char* const* results) {
- __ FinalizeCode();
- size_t cs = __ CodeSize();
- std::vector<uint8_t> managed_code(cs);
- MemoryRegion code(&managed_code[0], managed_code.size());
- __ FinalizeInstructions(code);
-
- DumpAndCheck(managed_code, testname, results);
-}
-
-void EmitAndCheck(arm::Thumb2Assembler* assembler, const char* testname) {
- InitResults();
- std::map<std::string, const char* const*>::iterator results = test_results.find(testname);
- ASSERT_NE(results, test_results.end());
-
- EmitAndCheck(assembler, testname, results->second);
-}
-
-#undef __
-
-class Thumb2AssemblerTest : public ::testing::Test {
- public:
- Thumb2AssemblerTest() : pool(), arena(&pool), assembler(&arena) { }
-
- ArenaPool pool;
- ArenaAllocator arena;
- arm::Thumb2Assembler assembler;
-};
-
-#define __ assembler.
-
-TEST_F(Thumb2AssemblerTest, SimpleMov) {
- __ movs(R0, ShifterOperand(R1));
- __ mov(R0, ShifterOperand(R1));
- __ mov(R8, ShifterOperand(R9));
-
- __ mov(R0, ShifterOperand(1));
- __ mov(R8, ShifterOperand(9));
-
- EmitAndCheck(&assembler, "SimpleMov");
-}
-
-TEST_F(Thumb2AssemblerTest, SimpleMov32) {
- __ Force32Bit();
-
- __ mov(R0, ShifterOperand(R1));
- __ mov(R8, ShifterOperand(R9));
-
- EmitAndCheck(&assembler, "SimpleMov32");
-}
-
-TEST_F(Thumb2AssemblerTest, SimpleMovAdd) {
- __ mov(R0, ShifterOperand(R1));
- __ adds(R0, R1, ShifterOperand(R2));
- __ add(R0, R1, ShifterOperand(0));
-
- EmitAndCheck(&assembler, "SimpleMovAdd");
-}
-
-TEST_F(Thumb2AssemblerTest, DataProcessingRegister) {
- // 32 bit variants using low registers.
- __ mvn(R0, ShifterOperand(R1), AL, kCcKeep);
- __ add(R0, R1, ShifterOperand(R2), AL, kCcKeep);
- __ sub(R0, R1, ShifterOperand(R2), AL, kCcKeep);
- __ and_(R0, R1, ShifterOperand(R2), AL, kCcKeep);
- __ orr(R0, R1, ShifterOperand(R2), AL, kCcKeep);
- __ orn(R0, R1, ShifterOperand(R2), AL, kCcKeep);
- __ eor(R0, R1, ShifterOperand(R2), AL, kCcKeep);
- __ bic(R0, R1, ShifterOperand(R2), AL, kCcKeep);
- __ adc(R0, R1, ShifterOperand(R2), AL, kCcKeep);
- __ sbc(R0, R1, ShifterOperand(R2), AL, kCcKeep);
- __ rsb(R0, R1, ShifterOperand(R2), AL, kCcKeep);
- __ teq(R0, ShifterOperand(R1));
-
- // 16 bit variants using low registers.
- __ movs(R0, ShifterOperand(R1));
- __ mov(R0, ShifterOperand(R1), AL, kCcKeep);
- __ mvns(R0, ShifterOperand(R1));
- __ add(R0, R0, ShifterOperand(R1), AL, kCcKeep);
- __ adds(R0, R1, ShifterOperand(R2));
- __ subs(R0, R1, ShifterOperand(R2));
- __ adcs(R0, R0, ShifterOperand(R1));
- __ sbcs(R0, R0, ShifterOperand(R1));
- __ ands(R0, R0, ShifterOperand(R1));
- __ orrs(R0, R0, ShifterOperand(R1));
- __ eors(R0, R0, ShifterOperand(R1));
- __ bics(R0, R0, ShifterOperand(R1));
- __ tst(R0, ShifterOperand(R1));
- __ cmp(R0, ShifterOperand(R1));
- __ cmn(R0, ShifterOperand(R1));
-
- // 16-bit variants using high registers.
- __ mov(R1, ShifterOperand(R8), AL, kCcKeep);
- __ mov(R9, ShifterOperand(R0), AL, kCcKeep);
- __ mov(R8, ShifterOperand(R9), AL, kCcKeep);
- __ add(R1, R1, ShifterOperand(R8), AL, kCcKeep);
- __ add(R9, R9, ShifterOperand(R0), AL, kCcKeep);
- __ add(R8, R8, ShifterOperand(R9), AL, kCcKeep);
- __ cmp(R0, ShifterOperand(R9));
- __ cmp(R8, ShifterOperand(R1));
- __ cmp(R9, ShifterOperand(R8));
-
- // The 16-bit RSBS Rd, Rn, #0, also known as NEGS Rd, Rn is specified using
- // an immediate (0) but emitted without any, so we test it here.
- __ rsbs(R0, R1, ShifterOperand(0));
- __ rsbs(R0, R0, ShifterOperand(0)); // Check Rd == Rn code path.
-
- // 32 bit variants using high registers that would be 16-bit if using low registers.
- __ movs(R0, ShifterOperand(R8));
- __ mvns(R0, ShifterOperand(R8));
- __ add(R0, R1, ShifterOperand(R8), AL, kCcKeep);
- __ adds(R0, R1, ShifterOperand(R8));
- __ subs(R0, R1, ShifterOperand(R8));
- __ adcs(R0, R0, ShifterOperand(R8));
- __ sbcs(R0, R0, ShifterOperand(R8));
- __ ands(R0, R0, ShifterOperand(R8));
- __ orrs(R0, R0, ShifterOperand(R8));
- __ eors(R0, R0, ShifterOperand(R8));
- __ bics(R0, R0, ShifterOperand(R8));
- __ tst(R0, ShifterOperand(R8));
- __ cmn(R0, ShifterOperand(R8));
- __ rsbs(R0, R8, ShifterOperand(0)); // Check that this is not emitted as 16-bit.
- __ rsbs(R8, R8, ShifterOperand(0)); // Check that this is not emitted as 16-bit (Rd == Rn).
-
- // 32-bit variants of instructions that would be 16-bit outside IT block.
- __ it(arm::EQ);
- __ mvns(R0, ShifterOperand(R1), arm::EQ);
- __ it(arm::EQ);
- __ adds(R0, R1, ShifterOperand(R2), arm::EQ);
- __ it(arm::EQ);
- __ subs(R0, R1, ShifterOperand(R2), arm::EQ);
- __ it(arm::EQ);
- __ adcs(R0, R0, ShifterOperand(R1), arm::EQ);
- __ it(arm::EQ);
- __ sbcs(R0, R0, ShifterOperand(R1), arm::EQ);
- __ it(arm::EQ);
- __ ands(R0, R0, ShifterOperand(R1), arm::EQ);
- __ it(arm::EQ);
- __ orrs(R0, R0, ShifterOperand(R1), arm::EQ);
- __ it(arm::EQ);
- __ eors(R0, R0, ShifterOperand(R1), arm::EQ);
- __ it(arm::EQ);
- __ bics(R0, R0, ShifterOperand(R1), arm::EQ);
-
- // 16-bit variants of instructions that would be 32-bit outside IT block.
- __ it(arm::EQ);
- __ mvn(R0, ShifterOperand(R1), arm::EQ, kCcKeep);
- __ it(arm::EQ);
- __ add(R0, R1, ShifterOperand(R2), arm::EQ, kCcKeep);
- __ it(arm::EQ);
- __ sub(R0, R1, ShifterOperand(R2), arm::EQ, kCcKeep);
- __ it(arm::EQ);
- __ adc(R0, R0, ShifterOperand(R1), arm::EQ, kCcKeep);
- __ it(arm::EQ);
- __ sbc(R0, R0, ShifterOperand(R1), arm::EQ, kCcKeep);
- __ it(arm::EQ);
- __ and_(R0, R0, ShifterOperand(R1), arm::EQ, kCcKeep);
- __ it(arm::EQ);
- __ orr(R0, R0, ShifterOperand(R1), arm::EQ, kCcKeep);
- __ it(arm::EQ);
- __ eor(R0, R0, ShifterOperand(R1), arm::EQ, kCcKeep);
- __ it(arm::EQ);
- __ bic(R0, R0, ShifterOperand(R1), arm::EQ, kCcKeep);
-
- // 16 bit variants selected for the default kCcDontCare.
- __ mov(R0, ShifterOperand(R1));
- __ mvn(R0, ShifterOperand(R1));
- __ add(R0, R0, ShifterOperand(R1));
- __ add(R0, R1, ShifterOperand(R2));
- __ sub(R0, R1, ShifterOperand(R2));
- __ adc(R0, R0, ShifterOperand(R1));
- __ sbc(R0, R0, ShifterOperand(R1));
- __ and_(R0, R0, ShifterOperand(R1));
- __ orr(R0, R0, ShifterOperand(R1));
- __ eor(R0, R0, ShifterOperand(R1));
- __ bic(R0, R0, ShifterOperand(R1));
- __ mov(R1, ShifterOperand(R8));
- __ mov(R9, ShifterOperand(R0));
- __ mov(R8, ShifterOperand(R9));
- __ add(R1, R1, ShifterOperand(R8));
- __ add(R9, R9, ShifterOperand(R0));
- __ add(R8, R8, ShifterOperand(R9));
- __ rsb(R0, R1, ShifterOperand(0));
- __ rsb(R0, R0, ShifterOperand(0));
-
- // And an arbitrary 32-bit instruction using IP.
- __ add(R12, R1, ShifterOperand(R0), AL, kCcKeep);
-
- EmitAndCheck(&assembler, "DataProcessingRegister");
-}
-
-TEST_F(Thumb2AssemblerTest, DataProcessingImmediate) {
- __ mov(R0, ShifterOperand(0x55));
- __ mvn(R0, ShifterOperand(0x55));
- __ add(R0, R1, ShifterOperand(0x55));
- __ sub(R0, R1, ShifterOperand(0x55));
- __ and_(R0, R1, ShifterOperand(0x55));
- __ orr(R0, R1, ShifterOperand(0x55));
- __ orn(R0, R1, ShifterOperand(0x55));
- __ eor(R0, R1, ShifterOperand(0x55));
- __ bic(R0, R1, ShifterOperand(0x55));
- __ adc(R0, R1, ShifterOperand(0x55));
- __ sbc(R0, R1, ShifterOperand(0x55));
- __ rsb(R0, R1, ShifterOperand(0x55));
-
- __ tst(R0, ShifterOperand(0x55));
- __ teq(R0, ShifterOperand(0x55));
- __ cmp(R0, ShifterOperand(0x55));
- __ cmn(R0, ShifterOperand(0x55));
-
- __ add(R0, R1, ShifterOperand(5));
- __ sub(R0, R1, ShifterOperand(5));
-
- __ movs(R0, ShifterOperand(0x55));
- __ mvns(R0, ShifterOperand(0x55));
-
- __ adds(R0, R1, ShifterOperand(5));
- __ subs(R0, R1, ShifterOperand(5));
-
- EmitAndCheck(&assembler, "DataProcessingImmediate");
-}
-
-TEST_F(Thumb2AssemblerTest, DataProcessingModifiedImmediate) {
- __ mov(R0, ShifterOperand(0x550055));
- __ mvn(R0, ShifterOperand(0x550055));
- __ add(R0, R1, ShifterOperand(0x550055));
- __ sub(R0, R1, ShifterOperand(0x550055));
- __ and_(R0, R1, ShifterOperand(0x550055));
- __ orr(R0, R1, ShifterOperand(0x550055));
- __ orn(R0, R1, ShifterOperand(0x550055));
- __ eor(R0, R1, ShifterOperand(0x550055));
- __ bic(R0, R1, ShifterOperand(0x550055));
- __ adc(R0, R1, ShifterOperand(0x550055));
- __ sbc(R0, R1, ShifterOperand(0x550055));
- __ rsb(R0, R1, ShifterOperand(0x550055));
-
- __ tst(R0, ShifterOperand(0x550055));
- __ teq(R0, ShifterOperand(0x550055));
- __ cmp(R0, ShifterOperand(0x550055));
- __ cmn(R0, ShifterOperand(0x550055));
-
- EmitAndCheck(&assembler, "DataProcessingModifiedImmediate");
-}
-
-
-TEST_F(Thumb2AssemblerTest, DataProcessingModifiedImmediates) {
- __ mov(R0, ShifterOperand(0x550055));
- __ mov(R0, ShifterOperand(0x55005500));
- __ mov(R0, ShifterOperand(0x55555555));
- __ mov(R0, ShifterOperand(0xd5000000)); // rotated to first position
- __ mov(R0, ShifterOperand(0x6a000000)); // rotated to second position
- __ mov(R0, ShifterOperand(0x350)); // rotated to 2nd last position
- __ mov(R0, ShifterOperand(0x1a8)); // rotated to last position
-
- EmitAndCheck(&assembler, "DataProcessingModifiedImmediates");
-}
-
-TEST_F(Thumb2AssemblerTest, DataProcessingShiftedRegister) {
- // 16-bit variants.
- __ movs(R3, ShifterOperand(R4, LSL, 4));
- __ movs(R3, ShifterOperand(R4, LSR, 5));
- __ movs(R3, ShifterOperand(R4, ASR, 6));
-
- // 32-bit ROR because ROR immediate doesn't have the same 16-bit version as other shifts.
- __ movs(R3, ShifterOperand(R4, ROR, 7));
-
- // 32-bit RRX because RRX has no 16-bit version.
- __ movs(R3, ShifterOperand(R4, RRX));
-
- // 32 bit variants (not setting condition codes).
- __ mov(R3, ShifterOperand(R4, LSL, 4), AL, kCcKeep);
- __ mov(R3, ShifterOperand(R4, LSR, 5), AL, kCcKeep);
- __ mov(R3, ShifterOperand(R4, ASR, 6), AL, kCcKeep);
- __ mov(R3, ShifterOperand(R4, ROR, 7), AL, kCcKeep);
- __ mov(R3, ShifterOperand(R4, RRX), AL, kCcKeep);
-
- // 32 bit variants (high registers).
- __ movs(R8, ShifterOperand(R4, LSL, 4));
- __ movs(R8, ShifterOperand(R4, LSR, 5));
- __ movs(R8, ShifterOperand(R4, ASR, 6));
- __ movs(R8, ShifterOperand(R4, ROR, 7));
- __ movs(R8, ShifterOperand(R4, RRX));
-
- EmitAndCheck(&assembler, "DataProcessingShiftedRegister");
-}
-
-TEST_F(Thumb2AssemblerTest, ShiftImmediate) {
- // Note: This test produces the same results as DataProcessingShiftedRegister
- // but it does so using shift functions instead of mov().
-
- // 16-bit variants.
- __ Lsl(R3, R4, 4);
- __ Lsr(R3, R4, 5);
- __ Asr(R3, R4, 6);
-
- // 32-bit ROR because ROR immediate doesn't have the same 16-bit version as other shifts.
- __ Ror(R3, R4, 7);
-
- // 32-bit RRX because RRX has no 16-bit version.
- __ Rrx(R3, R4);
-
- // 32 bit variants (not setting condition codes).
- __ Lsl(R3, R4, 4, AL, kCcKeep);
- __ Lsr(R3, R4, 5, AL, kCcKeep);
- __ Asr(R3, R4, 6, AL, kCcKeep);
- __ Ror(R3, R4, 7, AL, kCcKeep);
- __ Rrx(R3, R4, AL, kCcKeep);
-
- // 32 bit variants (high registers).
- __ Lsls(R8, R4, 4);
- __ Lsrs(R8, R4, 5);
- __ Asrs(R8, R4, 6);
- __ Rors(R8, R4, 7);
- __ Rrxs(R8, R4);
-
- EmitAndCheck(&assembler, "ShiftImmediate");
-}
-
-TEST_F(Thumb2AssemblerTest, BasicLoad) {
- __ ldr(R3, Address(R4, 24));
- __ ldrb(R3, Address(R4, 24));
- __ ldrh(R3, Address(R4, 24));
- __ ldrsb(R3, Address(R4, 24));
- __ ldrsh(R3, Address(R4, 24));
-
- __ ldr(R3, Address(SP, 24));
-
- // 32 bit variants
- __ ldr(R8, Address(R4, 24));
- __ ldrb(R8, Address(R4, 24));
- __ ldrh(R8, Address(R4, 24));
- __ ldrsb(R8, Address(R4, 24));
- __ ldrsh(R8, Address(R4, 24));
-
- EmitAndCheck(&assembler, "BasicLoad");
-}
-
-
-TEST_F(Thumb2AssemblerTest, BasicStore) {
- __ str(R3, Address(R4, 24));
- __ strb(R3, Address(R4, 24));
- __ strh(R3, Address(R4, 24));
-
- __ str(R3, Address(SP, 24));
-
- // 32 bit variants.
- __ str(R8, Address(R4, 24));
- __ strb(R8, Address(R4, 24));
- __ strh(R8, Address(R4, 24));
-
- EmitAndCheck(&assembler, "BasicStore");
-}
-
-TEST_F(Thumb2AssemblerTest, ComplexLoad) {
- __ ldr(R3, Address(R4, 24, Address::Mode::Offset));
- __ ldr(R3, Address(R4, 24, Address::Mode::PreIndex));
- __ ldr(R3, Address(R4, 24, Address::Mode::PostIndex));
- __ ldr(R3, Address(R4, 24, Address::Mode::NegOffset));
- __ ldr(R3, Address(R4, 24, Address::Mode::NegPreIndex));
- __ ldr(R3, Address(R4, 24, Address::Mode::NegPostIndex));
-
- __ ldrb(R3, Address(R4, 24, Address::Mode::Offset));
- __ ldrb(R3, Address(R4, 24, Address::Mode::PreIndex));
- __ ldrb(R3, Address(R4, 24, Address::Mode::PostIndex));
- __ ldrb(R3, Address(R4, 24, Address::Mode::NegOffset));
- __ ldrb(R3, Address(R4, 24, Address::Mode::NegPreIndex));
- __ ldrb(R3, Address(R4, 24, Address::Mode::NegPostIndex));
-
- __ ldrh(R3, Address(R4, 24, Address::Mode::Offset));
- __ ldrh(R3, Address(R4, 24, Address::Mode::PreIndex));
- __ ldrh(R3, Address(R4, 24, Address::Mode::PostIndex));
- __ ldrh(R3, Address(R4, 24, Address::Mode::NegOffset));
- __ ldrh(R3, Address(R4, 24, Address::Mode::NegPreIndex));
- __ ldrh(R3, Address(R4, 24, Address::Mode::NegPostIndex));
-
- __ ldrsb(R3, Address(R4, 24, Address::Mode::Offset));
- __ ldrsb(R3, Address(R4, 24, Address::Mode::PreIndex));
- __ ldrsb(R3, Address(R4, 24, Address::Mode::PostIndex));
- __ ldrsb(R3, Address(R4, 24, Address::Mode::NegOffset));
- __ ldrsb(R3, Address(R4, 24, Address::Mode::NegPreIndex));
- __ ldrsb(R3, Address(R4, 24, Address::Mode::NegPostIndex));
-
- __ ldrsh(R3, Address(R4, 24, Address::Mode::Offset));
- __ ldrsh(R3, Address(R4, 24, Address::Mode::PreIndex));
- __ ldrsh(R3, Address(R4, 24, Address::Mode::PostIndex));
- __ ldrsh(R3, Address(R4, 24, Address::Mode::NegOffset));
- __ ldrsh(R3, Address(R4, 24, Address::Mode::NegPreIndex));
- __ ldrsh(R3, Address(R4, 24, Address::Mode::NegPostIndex));
-
- EmitAndCheck(&assembler, "ComplexLoad");
-}
-
-
-TEST_F(Thumb2AssemblerTest, ComplexStore) {
- __ str(R3, Address(R4, 24, Address::Mode::Offset));
- __ str(R3, Address(R4, 24, Address::Mode::PreIndex));
- __ str(R3, Address(R4, 24, Address::Mode::PostIndex));
- __ str(R3, Address(R4, 24, Address::Mode::NegOffset));
- __ str(R3, Address(R4, 24, Address::Mode::NegPreIndex));
- __ str(R3, Address(R4, 24, Address::Mode::NegPostIndex));
-
- __ strb(R3, Address(R4, 24, Address::Mode::Offset));
- __ strb(R3, Address(R4, 24, Address::Mode::PreIndex));
- __ strb(R3, Address(R4, 24, Address::Mode::PostIndex));
- __ strb(R3, Address(R4, 24, Address::Mode::NegOffset));
- __ strb(R3, Address(R4, 24, Address::Mode::NegPreIndex));
- __ strb(R3, Address(R4, 24, Address::Mode::NegPostIndex));
-
- __ strh(R3, Address(R4, 24, Address::Mode::Offset));
- __ strh(R3, Address(R4, 24, Address::Mode::PreIndex));
- __ strh(R3, Address(R4, 24, Address::Mode::PostIndex));
- __ strh(R3, Address(R4, 24, Address::Mode::NegOffset));
- __ strh(R3, Address(R4, 24, Address::Mode::NegPreIndex));
- __ strh(R3, Address(R4, 24, Address::Mode::NegPostIndex));
-
- EmitAndCheck(&assembler, "ComplexStore");
-}
-
-TEST_F(Thumb2AssemblerTest, NegativeLoadStore) {
- __ ldr(R3, Address(R4, -24, Address::Mode::Offset));
- __ ldr(R3, Address(R4, -24, Address::Mode::PreIndex));
- __ ldr(R3, Address(R4, -24, Address::Mode::PostIndex));
- __ ldr(R3, Address(R4, -24, Address::Mode::NegOffset));
- __ ldr(R3, Address(R4, -24, Address::Mode::NegPreIndex));
- __ ldr(R3, Address(R4, -24, Address::Mode::NegPostIndex));
-
- __ ldrb(R3, Address(R4, -24, Address::Mode::Offset));
- __ ldrb(R3, Address(R4, -24, Address::Mode::PreIndex));
- __ ldrb(R3, Address(R4, -24, Address::Mode::PostIndex));
- __ ldrb(R3, Address(R4, -24, Address::Mode::NegOffset));
- __ ldrb(R3, Address(R4, -24, Address::Mode::NegPreIndex));
- __ ldrb(R3, Address(R4, -24, Address::Mode::NegPostIndex));
-
- __ ldrh(R3, Address(R4, -24, Address::Mode::Offset));
- __ ldrh(R3, Address(R4, -24, Address::Mode::PreIndex));
- __ ldrh(R3, Address(R4, -24, Address::Mode::PostIndex));
- __ ldrh(R3, Address(R4, -24, Address::Mode::NegOffset));
- __ ldrh(R3, Address(R4, -24, Address::Mode::NegPreIndex));
- __ ldrh(R3, Address(R4, -24, Address::Mode::NegPostIndex));
-
- __ ldrsb(R3, Address(R4, -24, Address::Mode::Offset));
- __ ldrsb(R3, Address(R4, -24, Address::Mode::PreIndex));
- __ ldrsb(R3, Address(R4, -24, Address::Mode::PostIndex));
- __ ldrsb(R3, Address(R4, -24, Address::Mode::NegOffset));
- __ ldrsb(R3, Address(R4, -24, Address::Mode::NegPreIndex));
- __ ldrsb(R3, Address(R4, -24, Address::Mode::NegPostIndex));
-
- __ ldrsh(R3, Address(R4, -24, Address::Mode::Offset));
- __ ldrsh(R3, Address(R4, -24, Address::Mode::PreIndex));
- __ ldrsh(R3, Address(R4, -24, Address::Mode::PostIndex));
- __ ldrsh(R3, Address(R4, -24, Address::Mode::NegOffset));
- __ ldrsh(R3, Address(R4, -24, Address::Mode::NegPreIndex));
- __ ldrsh(R3, Address(R4, -24, Address::Mode::NegPostIndex));
-
- __ str(R3, Address(R4, -24, Address::Mode::Offset));
- __ str(R3, Address(R4, -24, Address::Mode::PreIndex));
- __ str(R3, Address(R4, -24, Address::Mode::PostIndex));
- __ str(R3, Address(R4, -24, Address::Mode::NegOffset));
- __ str(R3, Address(R4, -24, Address::Mode::NegPreIndex));
- __ str(R3, Address(R4, -24, Address::Mode::NegPostIndex));
-
- __ strb(R3, Address(R4, -24, Address::Mode::Offset));
- __ strb(R3, Address(R4, -24, Address::Mode::PreIndex));
- __ strb(R3, Address(R4, -24, Address::Mode::PostIndex));
- __ strb(R3, Address(R4, -24, Address::Mode::NegOffset));
- __ strb(R3, Address(R4, -24, Address::Mode::NegPreIndex));
- __ strb(R3, Address(R4, -24, Address::Mode::NegPostIndex));
-
- __ strh(R3, Address(R4, -24, Address::Mode::Offset));
- __ strh(R3, Address(R4, -24, Address::Mode::PreIndex));
- __ strh(R3, Address(R4, -24, Address::Mode::PostIndex));
- __ strh(R3, Address(R4, -24, Address::Mode::NegOffset));
- __ strh(R3, Address(R4, -24, Address::Mode::NegPreIndex));
- __ strh(R3, Address(R4, -24, Address::Mode::NegPostIndex));
-
- EmitAndCheck(&assembler, "NegativeLoadStore");
-}
-
-TEST_F(Thumb2AssemblerTest, SimpleLoadStoreDual) {
- __ strd(R2, Address(R0, 24, Address::Mode::Offset));
- __ ldrd(R2, Address(R0, 24, Address::Mode::Offset));
-
- EmitAndCheck(&assembler, "SimpleLoadStoreDual");
-}
-
-TEST_F(Thumb2AssemblerTest, ComplexLoadStoreDual) {
- __ strd(R2, Address(R0, 24, Address::Mode::Offset));
- __ strd(R2, Address(R0, 24, Address::Mode::PreIndex));
- __ strd(R2, Address(R0, 24, Address::Mode::PostIndex));
- __ strd(R2, Address(R0, 24, Address::Mode::NegOffset));
- __ strd(R2, Address(R0, 24, Address::Mode::NegPreIndex));
- __ strd(R2, Address(R0, 24, Address::Mode::NegPostIndex));
-
- __ ldrd(R2, Address(R0, 24, Address::Mode::Offset));
- __ ldrd(R2, Address(R0, 24, Address::Mode::PreIndex));
- __ ldrd(R2, Address(R0, 24, Address::Mode::PostIndex));
- __ ldrd(R2, Address(R0, 24, Address::Mode::NegOffset));
- __ ldrd(R2, Address(R0, 24, Address::Mode::NegPreIndex));
- __ ldrd(R2, Address(R0, 24, Address::Mode::NegPostIndex));
-
- EmitAndCheck(&assembler, "ComplexLoadStoreDual");
-}
-
-TEST_F(Thumb2AssemblerTest, NegativeLoadStoreDual) {
- __ strd(R2, Address(R0, -24, Address::Mode::Offset));
- __ strd(R2, Address(R0, -24, Address::Mode::PreIndex));
- __ strd(R2, Address(R0, -24, Address::Mode::PostIndex));
- __ strd(R2, Address(R0, -24, Address::Mode::NegOffset));
- __ strd(R2, Address(R0, -24, Address::Mode::NegPreIndex));
- __ strd(R2, Address(R0, -24, Address::Mode::NegPostIndex));
-
- __ ldrd(R2, Address(R0, -24, Address::Mode::Offset));
- __ ldrd(R2, Address(R0, -24, Address::Mode::PreIndex));
- __ ldrd(R2, Address(R0, -24, Address::Mode::PostIndex));
- __ ldrd(R2, Address(R0, -24, Address::Mode::NegOffset));
- __ ldrd(R2, Address(R0, -24, Address::Mode::NegPreIndex));
- __ ldrd(R2, Address(R0, -24, Address::Mode::NegPostIndex));
-
- EmitAndCheck(&assembler, "NegativeLoadStoreDual");
-}
-
-TEST_F(Thumb2AssemblerTest, SimpleBranch) {
- Label l1;
- __ mov(R0, ShifterOperand(2));
- __ Bind(&l1);
- __ mov(R1, ShifterOperand(1));
- __ b(&l1);
- Label l2;
- __ b(&l2);
- __ mov(R1, ShifterOperand(2));
- __ Bind(&l2);
- __ mov(R0, ShifterOperand(3));
-
- Label l3;
- __ mov(R0, ShifterOperand(2));
- __ Bind(&l3);
- __ mov(R1, ShifterOperand(1));
- __ b(&l3, EQ);
-
- Label l4;
- __ b(&l4, EQ);
- __ mov(R1, ShifterOperand(2));
- __ Bind(&l4);
- __ mov(R0, ShifterOperand(3));
-
- // 2 linked labels.
- Label l5;
- __ b(&l5);
- __ mov(R1, ShifterOperand(4));
- __ b(&l5);
- __ mov(R1, ShifterOperand(5));
- __ Bind(&l5);
- __ mov(R0, ShifterOperand(6));
-
- EmitAndCheck(&assembler, "SimpleBranch");
-}
-
-TEST_F(Thumb2AssemblerTest, LongBranch) {
- __ Force32Bit();
- // 32 bit branches.
- Label l1;
- __ mov(R0, ShifterOperand(2));
- __ Bind(&l1);
- __ mov(R1, ShifterOperand(1));
- __ b(&l1);
-
- Label l2;
- __ b(&l2);
- __ mov(R1, ShifterOperand(2));
- __ Bind(&l2);
- __ mov(R0, ShifterOperand(3));
-
- Label l3;
- __ mov(R0, ShifterOperand(2));
- __ Bind(&l3);
- __ mov(R1, ShifterOperand(1));
- __ b(&l3, EQ);
-
- Label l4;
- __ b(&l4, EQ);
- __ mov(R1, ShifterOperand(2));
- __ Bind(&l4);
- __ mov(R0, ShifterOperand(3));
-
- // 2 linked labels.
- Label l5;
- __ b(&l5);
- __ mov(R1, ShifterOperand(4));
- __ b(&l5);
- __ mov(R1, ShifterOperand(5));
- __ Bind(&l5);
- __ mov(R0, ShifterOperand(6));
-
- EmitAndCheck(&assembler, "LongBranch");
-}
-
-TEST_F(Thumb2AssemblerTest, LoadMultiple) {
- // 16 bit.
- __ ldm(DB_W, R4, (1 << R0 | 1 << R3));
-
- // 32 bit.
- __ ldm(DB_W, R4, (1 << LR | 1 << R11));
- __ ldm(DB, R4, (1 << LR | 1 << R11));
-
- // Single reg is converted to ldr
- __ ldm(DB_W, R4, (1 << R5));
-
- EmitAndCheck(&assembler, "LoadMultiple");
-}
-
-TEST_F(Thumb2AssemblerTest, StoreMultiple) {
- // 16 bit.
- __ stm(IA_W, R4, (1 << R0 | 1 << R3));
-
- // 32 bit.
- __ stm(IA_W, R4, (1 << LR | 1 << R11));
- __ stm(IA, R4, (1 << LR | 1 << R11));
-
- // Single reg is converted to str
- __ stm(IA_W, R4, (1 << R5));
- __ stm(IA, R4, (1 << R5));
-
- EmitAndCheck(&assembler, "StoreMultiple");
-}
-
-TEST_F(Thumb2AssemblerTest, MovWMovT) {
- // Always 32 bit.
- __ movw(R4, 0);
- __ movw(R4, 0x34);
- __ movw(R9, 0x34);
- __ movw(R3, 0x1234);
- __ movw(R9, 0xffff);
-
- // Always 32 bit.
- __ movt(R0, 0);
- __ movt(R0, 0x1234);
- __ movt(R1, 0xffff);
-
- EmitAndCheck(&assembler, "MovWMovT");
-}
-
-TEST_F(Thumb2AssemblerTest, SpecialAddSub) {
- __ add(R2, SP, ShifterOperand(0x50)); // 16 bit.
- __ add(SP, SP, ShifterOperand(0x50)); // 16 bit.
- __ add(R8, SP, ShifterOperand(0x50)); // 32 bit.
-
- __ add(R2, SP, ShifterOperand(0xf00)); // 32 bit due to imm size.
- __ add(SP, SP, ShifterOperand(0xf00)); // 32 bit due to imm size.
- __ add(SP, SP, ShifterOperand(0xffc)); // 32 bit due to imm size; encoding T4.
-
- __ sub(SP, SP, ShifterOperand(0x50)); // 16 bit
- __ sub(R0, SP, ShifterOperand(0x50)); // 32 bit
- __ sub(R8, SP, ShifterOperand(0x50)); // 32 bit.
-
- __ sub(SP, SP, ShifterOperand(0xf00)); // 32 bit due to imm size
- __ sub(SP, SP, ShifterOperand(0xffc)); // 32 bit due to imm size; encoding T4.
-
- EmitAndCheck(&assembler, "SpecialAddSub");
-}
-
-TEST_F(Thumb2AssemblerTest, LoadFromOffset) {
- __ LoadFromOffset(kLoadWord, R2, R4, 12);
- __ LoadFromOffset(kLoadWord, R2, R4, 0xfff);
- __ LoadFromOffset(kLoadWord, R2, R4, 0x1000);
- __ LoadFromOffset(kLoadWord, R2, R4, 0x1000a4);
- __ LoadFromOffset(kLoadWord, R2, R4, 0x101000);
- __ LoadFromOffset(kLoadWord, R4, R4, 0x101000);
- __ LoadFromOffset(kLoadUnsignedHalfword, R2, R4, 12);
- __ LoadFromOffset(kLoadUnsignedHalfword, R2, R4, 0xfff);
- __ LoadFromOffset(kLoadUnsignedHalfword, R2, R4, 0x1000);
- __ LoadFromOffset(kLoadUnsignedHalfword, R2, R4, 0x1000a4);
- __ LoadFromOffset(kLoadUnsignedHalfword, R2, R4, 0x101000);
- __ LoadFromOffset(kLoadUnsignedHalfword, R4, R4, 0x101000);
- __ LoadFromOffset(kLoadWordPair, R2, R4, 12);
- __ LoadFromOffset(kLoadWordPair, R2, R4, 0x3fc);
- __ LoadFromOffset(kLoadWordPair, R2, R4, 0x400);
- __ LoadFromOffset(kLoadWordPair, R2, R4, 0x400a4);
- __ LoadFromOffset(kLoadWordPair, R2, R4, 0x40400);
- __ LoadFromOffset(kLoadWordPair, R4, R4, 0x40400);
-
- __ LoadFromOffset(kLoadWord, R0, R12, 12); // 32-bit because of R12.
- __ LoadFromOffset(kLoadWord, R2, R4, 0xa4 - 0x100000);
-
- __ LoadFromOffset(kLoadSignedByte, R2, R4, 12);
- __ LoadFromOffset(kLoadUnsignedByte, R2, R4, 12);
- __ LoadFromOffset(kLoadSignedHalfword, R2, R4, 12);
-
- EmitAndCheck(&assembler, "LoadFromOffset");
-}
-
-TEST_F(Thumb2AssemblerTest, StoreToOffset) {
- __ StoreToOffset(kStoreWord, R2, R4, 12);
- __ StoreToOffset(kStoreWord, R2, R4, 0xfff);
- __ StoreToOffset(kStoreWord, R2, R4, 0x1000);
- __ StoreToOffset(kStoreWord, R2, R4, 0x1000a4);
- __ StoreToOffset(kStoreWord, R2, R4, 0x101000);
- __ StoreToOffset(kStoreWord, R4, R4, 0x101000);
- __ StoreToOffset(kStoreHalfword, R2, R4, 12);
- __ StoreToOffset(kStoreHalfword, R2, R4, 0xfff);
- __ StoreToOffset(kStoreHalfword, R2, R4, 0x1000);
- __ StoreToOffset(kStoreHalfword, R2, R4, 0x1000a4);
- __ StoreToOffset(kStoreHalfword, R2, R4, 0x101000);
- __ StoreToOffset(kStoreHalfword, R4, R4, 0x101000);
- __ StoreToOffset(kStoreWordPair, R2, R4, 12);
- __ StoreToOffset(kStoreWordPair, R2, R4, 0x3fc);
- __ StoreToOffset(kStoreWordPair, R2, R4, 0x400);
- __ StoreToOffset(kStoreWordPair, R2, R4, 0x400a4);
- __ StoreToOffset(kStoreWordPair, R2, R4, 0x40400);
- __ StoreToOffset(kStoreWordPair, R4, R4, 0x40400);
-
- __ StoreToOffset(kStoreWord, R0, R12, 12); // 32-bit because of R12.
- __ StoreToOffset(kStoreWord, R2, R4, 0xa4 - 0x100000);
-
- __ StoreToOffset(kStoreByte, R2, R4, 12);
-
- EmitAndCheck(&assembler, "StoreToOffset");
-}
-
-TEST_F(Thumb2AssemblerTest, IfThen) {
- __ it(EQ);
- __ mov(R1, ShifterOperand(1), EQ);
-
- __ it(EQ, kItThen);
- __ mov(R1, ShifterOperand(1), EQ);
- __ mov(R2, ShifterOperand(2), EQ);
-
- __ it(EQ, kItElse);
- __ mov(R1, ShifterOperand(1), EQ);
- __ mov(R2, ShifterOperand(2), NE);
-
- __ it(EQ, kItThen, kItElse);
- __ mov(R1, ShifterOperand(1), EQ);
- __ mov(R2, ShifterOperand(2), EQ);
- __ mov(R3, ShifterOperand(3), NE);
-
- __ it(EQ, kItElse, kItElse);
- __ mov(R1, ShifterOperand(1), EQ);
- __ mov(R2, ShifterOperand(2), NE);
- __ mov(R3, ShifterOperand(3), NE);
-
- __ it(EQ, kItThen, kItThen, kItElse);
- __ mov(R1, ShifterOperand(1), EQ);
- __ mov(R2, ShifterOperand(2), EQ);
- __ mov(R3, ShifterOperand(3), EQ);
- __ mov(R4, ShifterOperand(4), NE);
-
- EmitAndCheck(&assembler, "IfThen");
-}
-
-TEST_F(Thumb2AssemblerTest, CbzCbnz) {
- Label l1;
- __ cbz(R2, &l1);
- __ mov(R1, ShifterOperand(3));
- __ mov(R2, ShifterOperand(3));
- __ Bind(&l1);
- __ mov(R2, ShifterOperand(4));
-
- Label l2;
- __ cbnz(R2, &l2);
- __ mov(R8, ShifterOperand(3));
- __ mov(R2, ShifterOperand(3));
- __ Bind(&l2);
- __ mov(R2, ShifterOperand(4));
-
- EmitAndCheck(&assembler, "CbzCbnz");
-}
-
-TEST_F(Thumb2AssemblerTest, Multiply) {
- __ mul(R0, R1, R0);
- __ mul(R0, R1, R2);
- __ mul(R8, R9, R8);
- __ mul(R8, R9, R10);
-
- __ mla(R0, R1, R2, R3);
- __ mla(R8, R9, R8, R9);
-
- __ mls(R0, R1, R2, R3);
- __ mls(R8, R9, R8, R9);
-
- __ umull(R0, R1, R2, R3);
- __ umull(R8, R9, R10, R11);
-
- EmitAndCheck(&assembler, "Multiply");
-}
-
-TEST_F(Thumb2AssemblerTest, Divide) {
- __ sdiv(R0, R1, R2);
- __ sdiv(R8, R9, R10);
-
- __ udiv(R0, R1, R2);
- __ udiv(R8, R9, R10);
-
- EmitAndCheck(&assembler, "Divide");
-}
-
-TEST_F(Thumb2AssemblerTest, VMov) {
- __ vmovs(S1, 1.0);
- __ vmovd(D1, 1.0);
-
- __ vmovs(S1, S2);
- __ vmovd(D1, D2);
-
- EmitAndCheck(&assembler, "VMov");
-}
-
-
-TEST_F(Thumb2AssemblerTest, BasicFloatingPoint) {
- __ vadds(S0, S1, S2);
- __ vsubs(S0, S1, S2);
- __ vmuls(S0, S1, S2);
- __ vmlas(S0, S1, S2);
- __ vmlss(S0, S1, S2);
- __ vdivs(S0, S1, S2);
- __ vabss(S0, S1);
- __ vnegs(S0, S1);
- __ vsqrts(S0, S1);
-
- __ vaddd(D0, D1, D2);
- __ vsubd(D0, D1, D2);
- __ vmuld(D0, D1, D2);
- __ vmlad(D0, D1, D2);
- __ vmlsd(D0, D1, D2);
- __ vdivd(D0, D1, D2);
- __ vabsd(D0, D1);
- __ vnegd(D0, D1);
- __ vsqrtd(D0, D1);
-
- EmitAndCheck(&assembler, "BasicFloatingPoint");
-}
-
-TEST_F(Thumb2AssemblerTest, FloatingPointConversions) {
- __ vcvtsd(S2, D2);
- __ vcvtds(D2, S2);
-
- __ vcvtis(S1, S2);
- __ vcvtsi(S1, S2);
-
- __ vcvtid(S1, D2);
- __ vcvtdi(D1, S2);
-
- __ vcvtus(S1, S2);
- __ vcvtsu(S1, S2);
-
- __ vcvtud(S1, D2);
- __ vcvtdu(D1, S2);
-
- EmitAndCheck(&assembler, "FloatingPointConversions");
-}
-
-TEST_F(Thumb2AssemblerTest, FloatingPointComparisons) {
- __ vcmps(S0, S1);
- __ vcmpd(D0, D1);
-
- __ vcmpsz(S2);
- __ vcmpdz(D2);
-
- EmitAndCheck(&assembler, "FloatingPointComparisons");
-}
-
-TEST_F(Thumb2AssemblerTest, Calls) {
- __ blx(LR);
- __ bx(LR);
-
- EmitAndCheck(&assembler, "Calls");
-}
-
-TEST_F(Thumb2AssemblerTest, Breakpoint) {
- __ bkpt(0);
-
- EmitAndCheck(&assembler, "Breakpoint");
-}
-
-TEST_F(Thumb2AssemblerTest, StrR1) {
- __ str(R1, Address(SP, 68));
- __ str(R1, Address(SP, 1068));
-
- EmitAndCheck(&assembler, "StrR1");
-}
-
-TEST_F(Thumb2AssemblerTest, VPushPop) {
- __ vpushs(S2, 4);
- __ vpushd(D2, 4);
-
- __ vpops(S2, 4);
- __ vpopd(D2, 4);
-
- EmitAndCheck(&assembler, "VPushPop");
-}
-
-TEST_F(Thumb2AssemblerTest, Max16BitBranch) {
- Label l1;
- __ b(&l1);
- for (int i = 0 ; i < (1 << 11) ; i += 2) {
- __ mov(R3, ShifterOperand(i & 0xff));
- }
- __ Bind(&l1);
- __ mov(R1, ShifterOperand(R2));
-
- EmitAndCheck(&assembler, "Max16BitBranch");
-}
-
-TEST_F(Thumb2AssemblerTest, Branch32) {
- Label l1;
- __ b(&l1);
- for (int i = 0 ; i < (1 << 11) + 2 ; i += 2) {
- __ mov(R3, ShifterOperand(i & 0xff));
- }
- __ Bind(&l1);
- __ mov(R1, ShifterOperand(R2));
-
- EmitAndCheck(&assembler, "Branch32");
-}
-
-TEST_F(Thumb2AssemblerTest, CompareAndBranchMax) {
- Label l1;
- __ cbz(R4, &l1);
- for (int i = 0 ; i < (1 << 7) ; i += 2) {
- __ mov(R3, ShifterOperand(i & 0xff));
- }
- __ Bind(&l1);
- __ mov(R1, ShifterOperand(R2));
-
- EmitAndCheck(&assembler, "CompareAndBranchMax");
-}
-
-TEST_F(Thumb2AssemblerTest, CompareAndBranchRelocation16) {
- Label l1;
- __ cbz(R4, &l1);
- for (int i = 0 ; i < (1 << 7) + 2 ; i += 2) {
- __ mov(R3, ShifterOperand(i & 0xff));
- }
- __ Bind(&l1);
- __ mov(R1, ShifterOperand(R2));
-
- EmitAndCheck(&assembler, "CompareAndBranchRelocation16");
-}
-
-TEST_F(Thumb2AssemblerTest, CompareAndBranchRelocation32) {
- Label l1;
- __ cbz(R4, &l1);
- for (int i = 0 ; i < (1 << 11) + 2 ; i += 2) {
- __ mov(R3, ShifterOperand(i & 0xff));
- }
- __ Bind(&l1);
- __ mov(R1, ShifterOperand(R2));
-
- EmitAndCheck(&assembler, "CompareAndBranchRelocation32");
-}
-
-TEST_F(Thumb2AssemblerTest, MixedBranch32) {
- Label l1;
- Label l2;
- __ b(&l1); // Forwards.
- __ Bind(&l2);
-
- // Space to force relocation.
- for (int i = 0 ; i < (1 << 11) + 2 ; i += 2) {
- __ mov(R3, ShifterOperand(i & 0xff));
- }
- __ b(&l2); // Backwards.
- __ Bind(&l1);
- __ mov(R1, ShifterOperand(R2));
-
- EmitAndCheck(&assembler, "MixedBranch32");
-}
-
-TEST_F(Thumb2AssemblerTest, Shifts) {
- // 16 bit selected for CcDontCare.
- __ Lsl(R0, R1, 5);
- __ Lsr(R0, R1, 5);
- __ Asr(R0, R1, 5);
-
- __ Lsl(R0, R0, R1);
- __ Lsr(R0, R0, R1);
- __ Asr(R0, R0, R1);
- __ Ror(R0, R0, R1);
-
- // 16 bit with kCcSet.
- __ Lsls(R0, R1, 5);
- __ Lsrs(R0, R1, 5);
- __ Asrs(R0, R1, 5);
-
- __ Lsls(R0, R0, R1);
- __ Lsrs(R0, R0, R1);
- __ Asrs(R0, R0, R1);
- __ Rors(R0, R0, R1);
-
- // 32-bit with kCcKeep.
- __ Lsl(R0, R1, 5, AL, kCcKeep);
- __ Lsr(R0, R1, 5, AL, kCcKeep);
- __ Asr(R0, R1, 5, AL, kCcKeep);
-
- __ Lsl(R0, R0, R1, AL, kCcKeep);
- __ Lsr(R0, R0, R1, AL, kCcKeep);
- __ Asr(R0, R0, R1, AL, kCcKeep);
- __ Ror(R0, R0, R1, AL, kCcKeep);
-
- // 32-bit because ROR immediate doesn't have a 16-bit version like the other shifts.
- __ Ror(R0, R1, 5);
- __ Rors(R0, R1, 5);
- __ Ror(R0, R1, 5, AL, kCcKeep);
-
- // 32 bit due to high registers.
- __ Lsl(R8, R1, 5);
- __ Lsr(R0, R8, 5);
- __ Asr(R8, R1, 5);
- __ Ror(R0, R8, 5);
-
- // 32 bit due to different Rd and Rn.
- __ Lsl(R0, R1, R2);
- __ Lsr(R0, R1, R2);
- __ Asr(R0, R1, R2);
- __ Ror(R0, R1, R2);
-
- // 32 bit due to use of high registers.
- __ Lsl(R8, R1, R2);
- __ Lsr(R0, R8, R2);
- __ Asr(R0, R1, R8);
-
- // S bit (all 32 bit)
-
- // 32 bit due to high registers.
- __ Lsls(R8, R1, 5);
- __ Lsrs(R0, R8, 5);
- __ Asrs(R8, R1, 5);
- __ Rors(R0, R8, 5);
-
- // 32 bit due to different Rd and Rn.
- __ Lsls(R0, R1, R2);
- __ Lsrs(R0, R1, R2);
- __ Asrs(R0, R1, R2);
- __ Rors(R0, R1, R2);
-
- // 32 bit due to use of high registers.
- __ Lsls(R8, R1, R2);
- __ Lsrs(R0, R8, R2);
- __ Asrs(R0, R1, R8);
-
- EmitAndCheck(&assembler, "Shifts");
-}
-
-TEST_F(Thumb2AssemblerTest, LoadStoreRegOffset) {
- // 16 bit.
- __ ldr(R0, Address(R1, R2));
- __ str(R0, Address(R1, R2));
-
- // 32 bit due to shift.
- __ ldr(R0, Address(R1, R2, LSL, 1));
- __ str(R0, Address(R1, R2, LSL, 1));
-
- __ ldr(R0, Address(R1, R2, LSL, 3));
- __ str(R0, Address(R1, R2, LSL, 3));
-
- // 32 bit due to high register use.
- __ ldr(R8, Address(R1, R2));
- __ str(R8, Address(R1, R2));
-
- __ ldr(R1, Address(R8, R2));
- __ str(R2, Address(R8, R2));
-
- __ ldr(R0, Address(R1, R8));
- __ str(R0, Address(R1, R8));
-
- EmitAndCheck(&assembler, "LoadStoreRegOffset");
-}
-
-TEST_F(Thumb2AssemblerTest, LoadStoreLimits) {
- __ ldr(R0, Address(R4, 124)); // 16 bit.
- __ ldr(R0, Address(R4, 128)); // 32 bit.
-
- __ ldrb(R0, Address(R4, 31)); // 16 bit.
- __ ldrb(R0, Address(R4, 32)); // 32 bit.
-
- __ ldrh(R0, Address(R4, 62)); // 16 bit.
- __ ldrh(R0, Address(R4, 64)); // 32 bit.
-
- __ ldrsb(R0, Address(R4, 31)); // 32 bit.
- __ ldrsb(R0, Address(R4, 32)); // 32 bit.
-
- __ ldrsh(R0, Address(R4, 62)); // 32 bit.
- __ ldrsh(R0, Address(R4, 64)); // 32 bit.
-
- __ str(R0, Address(R4, 124)); // 16 bit.
- __ str(R0, Address(R4, 128)); // 32 bit.
-
- __ strb(R0, Address(R4, 31)); // 16 bit.
- __ strb(R0, Address(R4, 32)); // 32 bit.
-
- __ strh(R0, Address(R4, 62)); // 16 bit.
- __ strh(R0, Address(R4, 64)); // 32 bit.
-
- EmitAndCheck(&assembler, "LoadStoreLimits");
-}
-
-TEST_F(Thumb2AssemblerTest, CompareAndBranch) {
- Label label;
- __ CompareAndBranchIfZero(arm::R0, &label);
- __ CompareAndBranchIfZero(arm::R11, &label);
- __ CompareAndBranchIfNonZero(arm::R0, &label);
- __ CompareAndBranchIfNonZero(arm::R11, &label);
- __ Bind(&label);
-
- EmitAndCheck(&assembler, "CompareAndBranch");
-}
-
-TEST_F(Thumb2AssemblerTest, AddConstant) {
- // Low registers, Rd != Rn.
- __ AddConstant(R0, R1, 0); // MOV.
- __ AddConstant(R0, R1, 1); // 16-bit ADDS, encoding T1.
- __ AddConstant(R0, R1, 7); // 16-bit ADDS, encoding T1.
- __ AddConstant(R0, R1, 8); // 32-bit ADD, encoding T3.
- __ AddConstant(R0, R1, 255); // 32-bit ADD, encoding T3.
- __ AddConstant(R0, R1, 256); // 32-bit ADD, encoding T3.
- __ AddConstant(R0, R1, 257); // 32-bit ADD, encoding T4.
- __ AddConstant(R0, R1, 0xfff); // 32-bit ADD, encoding T4.
- __ AddConstant(R0, R1, 0x1000); // 32-bit ADD, encoding T3.
- __ AddConstant(R0, R1, 0x1001); // MVN+SUB.
- __ AddConstant(R0, R1, 0x1002); // MOVW+ADD.
- __ AddConstant(R0, R1, 0xffff); // MOVW+ADD.
- __ AddConstant(R0, R1, 0x10000); // 32-bit ADD, encoding T3.
- __ AddConstant(R0, R1, 0x10001); // 32-bit ADD, encoding T3.
- __ AddConstant(R0, R1, 0x10002); // MVN+SUB.
- __ AddConstant(R0, R1, 0x10003); // MOVW+MOVT+ADD.
- __ AddConstant(R0, R1, -1); // 16-bit SUBS.
- __ AddConstant(R0, R1, -7); // 16-bit SUBS.
- __ AddConstant(R0, R1, -8); // 32-bit SUB, encoding T3.
- __ AddConstant(R0, R1, -255); // 32-bit SUB, encoding T3.
- __ AddConstant(R0, R1, -256); // 32-bit SUB, encoding T3.
- __ AddConstant(R0, R1, -257); // 32-bit SUB, encoding T4.
- __ AddConstant(R0, R1, -0xfff); // 32-bit SUB, encoding T4.
- __ AddConstant(R0, R1, -0x1000); // 32-bit SUB, encoding T3.
- __ AddConstant(R0, R1, -0x1001); // MVN+ADD.
- __ AddConstant(R0, R1, -0x1002); // MOVW+SUB.
- __ AddConstant(R0, R1, -0xffff); // MOVW+SUB.
- __ AddConstant(R0, R1, -0x10000); // 32-bit SUB, encoding T3.
- __ AddConstant(R0, R1, -0x10001); // 32-bit SUB, encoding T3.
- __ AddConstant(R0, R1, -0x10002); // MVN+ADD.
- __ AddConstant(R0, R1, -0x10003); // MOVW+MOVT+ADD.
-
- // Low registers, Rd == Rn.
- __ AddConstant(R0, R0, 0); // Nothing.
- __ AddConstant(R1, R1, 1); // 16-bit ADDS, encoding T2,
- __ AddConstant(R0, R0, 7); // 16-bit ADDS, encoding T2.
- __ AddConstant(R1, R1, 8); // 16-bit ADDS, encoding T2.
- __ AddConstant(R0, R0, 255); // 16-bit ADDS, encoding T2.
- __ AddConstant(R1, R1, 256); // 32-bit ADD, encoding T3.
- __ AddConstant(R0, R0, 257); // 32-bit ADD, encoding T4.
- __ AddConstant(R1, R1, 0xfff); // 32-bit ADD, encoding T4.
- __ AddConstant(R0, R0, 0x1000); // 32-bit ADD, encoding T3.
- __ AddConstant(R1, R1, 0x1001); // MVN+SUB.
- __ AddConstant(R0, R0, 0x1002); // MOVW+ADD.
- __ AddConstant(R1, R1, 0xffff); // MOVW+ADD.
- __ AddConstant(R0, R0, 0x10000); // 32-bit ADD, encoding T3.
- __ AddConstant(R1, R1, 0x10001); // 32-bit ADD, encoding T3.
- __ AddConstant(R0, R0, 0x10002); // MVN+SUB.
- __ AddConstant(R1, R1, 0x10003); // MOVW+MOVT+ADD.
- __ AddConstant(R0, R0, -1); // 16-bit SUBS, encoding T2.
- __ AddConstant(R1, R1, -7); // 16-bit SUBS, encoding T2.
- __ AddConstant(R0, R0, -8); // 16-bit SUBS, encoding T2.
- __ AddConstant(R1, R1, -255); // 16-bit SUBS, encoding T2.
- __ AddConstant(R0, R0, -256); // 32-bit SUB, encoding T3.
- __ AddConstant(R1, R1, -257); // 32-bit SUB, encoding T4.
- __ AddConstant(R0, R0, -0xfff); // 32-bit SUB, encoding T4.
- __ AddConstant(R1, R1, -0x1000); // 32-bit SUB, encoding T3.
- __ AddConstant(R0, R0, -0x1001); // MVN+ADD.
- __ AddConstant(R1, R1, -0x1002); // MOVW+SUB.
- __ AddConstant(R0, R0, -0xffff); // MOVW+SUB.
- __ AddConstant(R1, R1, -0x10000); // 32-bit SUB, encoding T3.
- __ AddConstant(R0, R0, -0x10001); // 32-bit SUB, encoding T3.
- __ AddConstant(R1, R1, -0x10002); // MVN+ADD.
- __ AddConstant(R0, R0, -0x10003); // MOVW+MOVT+ADD.
-
- // High registers.
- __ AddConstant(R8, R8, 0); // Nothing.
- __ AddConstant(R8, R1, 1); // 32-bit ADD, encoding T3,
- __ AddConstant(R0, R8, 7); // 32-bit ADD, encoding T3.
- __ AddConstant(R8, R8, 8); // 32-bit ADD, encoding T3.
- __ AddConstant(R8, R1, 255); // 32-bit ADD, encoding T3.
- __ AddConstant(R0, R8, 256); // 32-bit ADD, encoding T3.
- __ AddConstant(R8, R8, 257); // 32-bit ADD, encoding T4.
- __ AddConstant(R8, R1, 0xfff); // 32-bit ADD, encoding T4.
- __ AddConstant(R0, R8, 0x1000); // 32-bit ADD, encoding T3.
- __ AddConstant(R8, R8, 0x1001); // MVN+SUB.
- __ AddConstant(R0, R1, 0x1002); // MOVW+ADD.
- __ AddConstant(R0, R8, 0xffff); // MOVW+ADD.
- __ AddConstant(R8, R8, 0x10000); // 32-bit ADD, encoding T3.
- __ AddConstant(R8, R1, 0x10001); // 32-bit ADD, encoding T3.
- __ AddConstant(R0, R8, 0x10002); // MVN+SUB.
- __ AddConstant(R0, R8, 0x10003); // MOVW+MOVT+ADD.
- __ AddConstant(R8, R8, -1); // 32-bit ADD, encoding T3.
- __ AddConstant(R8, R1, -7); // 32-bit SUB, encoding T3.
- __ AddConstant(R0, R8, -8); // 32-bit SUB, encoding T3.
- __ AddConstant(R8, R8, -255); // 32-bit SUB, encoding T3.
- __ AddConstant(R8, R1, -256); // 32-bit SUB, encoding T3.
- __ AddConstant(R0, R8, -257); // 32-bit SUB, encoding T4.
- __ AddConstant(R8, R8, -0xfff); // 32-bit SUB, encoding T4.
- __ AddConstant(R8, R1, -0x1000); // 32-bit SUB, encoding T3.
- __ AddConstant(R0, R8, -0x1001); // MVN+ADD.
- __ AddConstant(R0, R1, -0x1002); // MOVW+SUB.
- __ AddConstant(R8, R1, -0xffff); // MOVW+SUB.
- __ AddConstant(R0, R8, -0x10000); // 32-bit SUB, encoding T3.
- __ AddConstant(R8, R8, -0x10001); // 32-bit SUB, encoding T3.
- __ AddConstant(R8, R1, -0x10002); // MVN+SUB.
- __ AddConstant(R0, R8, -0x10003); // MOVW+MOVT+ADD.
-
- // Low registers, Rd != Rn, kCcKeep.
- __ AddConstant(R0, R1, 0, AL, kCcKeep); // MOV.
- __ AddConstant(R0, R1, 1, AL, kCcKeep); // 32-bit ADD, encoding T3.
- __ AddConstant(R0, R1, 7, AL, kCcKeep); // 32-bit ADD, encoding T3.
- __ AddConstant(R0, R1, 8, AL, kCcKeep); // 32-bit ADD, encoding T3.
- __ AddConstant(R0, R1, 255, AL, kCcKeep); // 32-bit ADD, encoding T3.
- __ AddConstant(R0, R1, 256, AL, kCcKeep); // 32-bit ADD, encoding T3.
- __ AddConstant(R0, R1, 257, AL, kCcKeep); // 32-bit ADD, encoding T4.
- __ AddConstant(R0, R1, 0xfff, AL, kCcKeep); // 32-bit ADD, encoding T4.
- __ AddConstant(R0, R1, 0x1000, AL, kCcKeep); // 32-bit ADD, encoding T3.
- __ AddConstant(R0, R1, 0x1001, AL, kCcKeep); // MVN+SUB.
- __ AddConstant(R0, R1, 0x1002, AL, kCcKeep); // MOVW+ADD.
- __ AddConstant(R0, R1, 0xffff, AL, kCcKeep); // MOVW+ADD.
- __ AddConstant(R0, R1, 0x10000, AL, kCcKeep); // 32-bit ADD, encoding T3.
- __ AddConstant(R0, R1, 0x10001, AL, kCcKeep); // 32-bit ADD, encoding T3.
- __ AddConstant(R0, R1, 0x10002, AL, kCcKeep); // MVN+SUB.
- __ AddConstant(R0, R1, 0x10003, AL, kCcKeep); // MOVW+MOVT+ADD.
- __ AddConstant(R0, R1, -1, AL, kCcKeep); // 32-bit ADD, encoding T3.
- __ AddConstant(R0, R1, -7, AL, kCcKeep); // 32-bit SUB, encoding T3.
- __ AddConstant(R0, R1, -8, AL, kCcKeep); // 32-bit SUB, encoding T3.
- __ AddConstant(R0, R1, -255, AL, kCcKeep); // 32-bit SUB, encoding T3.
- __ AddConstant(R0, R1, -256, AL, kCcKeep); // 32-bit SUB, encoding T3.
- __ AddConstant(R0, R1, -257, AL, kCcKeep); // 32-bit SUB, encoding T4.
- __ AddConstant(R0, R1, -0xfff, AL, kCcKeep); // 32-bit SUB, encoding T4.
- __ AddConstant(R0, R1, -0x1000, AL, kCcKeep); // 32-bit SUB, encoding T3.
- __ AddConstant(R0, R1, -0x1001, AL, kCcKeep); // MVN+ADD.
- __ AddConstant(R0, R1, -0x1002, AL, kCcKeep); // MOVW+SUB.
- __ AddConstant(R0, R1, -0xffff, AL, kCcKeep); // MOVW+SUB.
- __ AddConstant(R0, R1, -0x10000, AL, kCcKeep); // 32-bit SUB, encoding T3.
- __ AddConstant(R0, R1, -0x10001, AL, kCcKeep); // 32-bit SUB, encoding T3.
- __ AddConstant(R0, R1, -0x10002, AL, kCcKeep); // MVN+ADD.
- __ AddConstant(R0, R1, -0x10003, AL, kCcKeep); // MOVW+MOVT+ADD.
-
- // Low registers, Rd == Rn, kCcKeep.
- __ AddConstant(R0, R0, 0, AL, kCcKeep); // Nothing.
- __ AddConstant(R1, R1, 1, AL, kCcKeep); // 32-bit ADD, encoding T3.
- __ AddConstant(R0, R0, 7, AL, kCcKeep); // 32-bit ADD, encoding T3.
- __ AddConstant(R1, R1, 8, AL, kCcKeep); // 32-bit ADD, encoding T3.
- __ AddConstant(R0, R0, 255, AL, kCcKeep); // 32-bit ADD, encoding T3.
- __ AddConstant(R1, R1, 256, AL, kCcKeep); // 32-bit ADD, encoding T3.
- __ AddConstant(R0, R0, 257, AL, kCcKeep); // 32-bit ADD, encoding T4.
- __ AddConstant(R1, R1, 0xfff, AL, kCcKeep); // 32-bit ADD, encoding T4.
- __ AddConstant(R0, R0, 0x1000, AL, kCcKeep); // 32-bit ADD, encoding T3.
- __ AddConstant(R1, R1, 0x1001, AL, kCcKeep); // MVN+SUB.
- __ AddConstant(R0, R0, 0x1002, AL, kCcKeep); // MOVW+ADD.
- __ AddConstant(R1, R1, 0xffff, AL, kCcKeep); // MOVW+ADD.
- __ AddConstant(R0, R0, 0x10000, AL, kCcKeep); // 32-bit ADD, encoding T3.
- __ AddConstant(R1, R1, 0x10001, AL, kCcKeep); // 32-bit ADD, encoding T3.
- __ AddConstant(R0, R0, 0x10002, AL, kCcKeep); // MVN+SUB.
- __ AddConstant(R1, R1, 0x10003, AL, kCcKeep); // MOVW+MOVT+ADD.
- __ AddConstant(R0, R0, -1, AL, kCcKeep); // 32-bit ADD, encoding T3.
- __ AddConstant(R1, R1, -7, AL, kCcKeep); // 32-bit SUB, encoding T3.
- __ AddConstant(R0, R0, -8, AL, kCcKeep); // 32-bit SUB, encoding T3.
- __ AddConstant(R1, R1, -255, AL, kCcKeep); // 32-bit SUB, encoding T3.
- __ AddConstant(R0, R0, -256, AL, kCcKeep); // 32-bit SUB, encoding T3.
- __ AddConstant(R1, R1, -257, AL, kCcKeep); // 32-bit SUB, encoding T4.
- __ AddConstant(R0, R0, -0xfff, AL, kCcKeep); // 32-bit SUB, encoding T4.
- __ AddConstant(R1, R1, -0x1000, AL, kCcKeep); // 32-bit SUB, encoding T3.
- __ AddConstant(R0, R0, -0x1001, AL, kCcKeep); // MVN+ADD.
- __ AddConstant(R1, R1, -0x1002, AL, kCcKeep); // MOVW+SUB.
- __ AddConstant(R0, R0, -0xffff, AL, kCcKeep); // MOVW+SUB.
- __ AddConstant(R1, R1, -0x10000, AL, kCcKeep); // 32-bit SUB, encoding T3.
- __ AddConstant(R0, R0, -0x10001, AL, kCcKeep); // 32-bit SUB, encoding T3.
- __ AddConstant(R1, R1, -0x10002, AL, kCcKeep); // MVN+ADD.
- __ AddConstant(R0, R0, -0x10003, AL, kCcKeep); // MOVW+MOVT+ADD.
-
- // Low registers, Rd != Rn, kCcSet.
- __ AddConstant(R0, R1, 0, AL, kCcSet); // 16-bit ADDS.
- __ AddConstant(R0, R1, 1, AL, kCcSet); // 16-bit ADDS.
- __ AddConstant(R0, R1, 7, AL, kCcSet); // 16-bit ADDS.
- __ AddConstant(R0, R1, 8, AL, kCcSet); // 32-bit ADDS, encoding T3.
- __ AddConstant(R0, R1, 255, AL, kCcSet); // 32-bit ADDS, encoding T3.
- __ AddConstant(R0, R1, 256, AL, kCcSet); // 32-bit ADDS, encoding T3.
- __ AddConstant(R0, R1, 257, AL, kCcSet); // MVN+SUBS.
- __ AddConstant(R0, R1, 0xfff, AL, kCcSet); // MOVW+ADDS.
- __ AddConstant(R0, R1, 0x1000, AL, kCcSet); // 32-bit ADDS, encoding T3.
- __ AddConstant(R0, R1, 0x1001, AL, kCcSet); // MVN+SUBS.
- __ AddConstant(R0, R1, 0x1002, AL, kCcSet); // MOVW+ADDS.
- __ AddConstant(R0, R1, 0xffff, AL, kCcSet); // MOVW+ADDS.
- __ AddConstant(R0, R1, 0x10000, AL, kCcSet); // 32-bit ADDS, encoding T3.
- __ AddConstant(R0, R1, 0x10001, AL, kCcSet); // 32-bit ADDS, encoding T3.
- __ AddConstant(R0, R1, 0x10002, AL, kCcSet); // MVN+SUBS.
- __ AddConstant(R0, R1, 0x10003, AL, kCcSet); // MOVW+MOVT+ADDS.
- __ AddConstant(R0, R1, -1, AL, kCcSet); // 16-bit SUBS.
- __ AddConstant(R0, R1, -7, AL, kCcSet); // 16-bit SUBS.
- __ AddConstant(R0, R1, -8, AL, kCcSet); // 32-bit SUBS, encoding T3.
- __ AddConstant(R0, R1, -255, AL, kCcSet); // 32-bit SUBS, encoding T3.
- __ AddConstant(R0, R1, -256, AL, kCcSet); // 32-bit SUBS, encoding T3.
- __ AddConstant(R0, R1, -257, AL, kCcSet); // MVN+ADDS.
- __ AddConstant(R0, R1, -0xfff, AL, kCcSet); // MOVW+SUBS.
- __ AddConstant(R0, R1, -0x1000, AL, kCcSet); // 32-bit SUBS, encoding T3.
- __ AddConstant(R0, R1, -0x1001, AL, kCcSet); // MVN+ADDS.
- __ AddConstant(R0, R1, -0x1002, AL, kCcSet); // MOVW+SUBS.
- __ AddConstant(R0, R1, -0xffff, AL, kCcSet); // MOVW+SUBS.
- __ AddConstant(R0, R1, -0x10000, AL, kCcSet); // 32-bit SUBS, encoding T3.
- __ AddConstant(R0, R1, -0x10001, AL, kCcSet); // 32-bit SUBS, encoding T3.
- __ AddConstant(R0, R1, -0x10002, AL, kCcSet); // MVN+ADDS.
- __ AddConstant(R0, R1, -0x10003, AL, kCcSet); // MOVW+MOVT+ADDS.
-
- // Low registers, Rd == Rn, kCcSet.
- __ AddConstant(R0, R0, 0, AL, kCcSet); // 16-bit ADDS, encoding T2.
- __ AddConstant(R1, R1, 1, AL, kCcSet); // 16-bit ADDS, encoding T2.
- __ AddConstant(R0, R0, 7, AL, kCcSet); // 16-bit ADDS, encoding T2.
- __ AddConstant(R1, R1, 8, AL, kCcSet); // 16-bit ADDS, encoding T2.
- __ AddConstant(R0, R0, 255, AL, kCcSet); // 16-bit ADDS, encoding T2.
- __ AddConstant(R1, R1, 256, AL, kCcSet); // 32-bit ADDS, encoding T3.
- __ AddConstant(R0, R0, 257, AL, kCcSet); // MVN+SUBS.
- __ AddConstant(R1, R1, 0xfff, AL, kCcSet); // MOVW+ADDS.
- __ AddConstant(R0, R0, 0x1000, AL, kCcSet); // 32-bit ADDS, encoding T3.
- __ AddConstant(R1, R1, 0x1001, AL, kCcSet); // MVN+SUBS.
- __ AddConstant(R0, R0, 0x1002, AL, kCcSet); // MOVW+ADDS.
- __ AddConstant(R1, R1, 0xffff, AL, kCcSet); // MOVW+ADDS.
- __ AddConstant(R0, R0, 0x10000, AL, kCcSet); // 32-bit ADDS, encoding T3.
- __ AddConstant(R1, R1, 0x10001, AL, kCcSet); // 32-bit ADDS, encoding T3.
- __ AddConstant(R0, R0, 0x10002, AL, kCcSet); // MVN+SUBS.
- __ AddConstant(R1, R1, 0x10003, AL, kCcSet); // MOVW+MOVT+ADDS.
- __ AddConstant(R0, R0, -1, AL, kCcSet); // 16-bit SUBS, encoding T2.
- __ AddConstant(R1, R1, -7, AL, kCcSet); // 16-bit SUBS, encoding T2.
- __ AddConstant(R0, R0, -8, AL, kCcSet); // 16-bit SUBS, encoding T2.
- __ AddConstant(R1, R1, -255, AL, kCcSet); // 16-bit SUBS, encoding T2.
- __ AddConstant(R0, R0, -256, AL, kCcSet); // 32-bit SUB, encoding T3.
- __ AddConstant(R1, R1, -257, AL, kCcSet); // MNV+ADDS.
- __ AddConstant(R0, R0, -0xfff, AL, kCcSet); // MOVW+SUBS.
- __ AddConstant(R1, R1, -0x1000, AL, kCcSet); // 32-bit SUB, encoding T3.
- __ AddConstant(R0, R0, -0x1001, AL, kCcSet); // MVN+ADDS.
- __ AddConstant(R1, R1, -0x1002, AL, kCcSet); // MOVW+SUBS.
- __ AddConstant(R0, R0, -0xffff, AL, kCcSet); // MOVW+SUBS.
- __ AddConstant(R1, R1, -0x10000, AL, kCcSet); // 32-bit SUBS, encoding T3.
- __ AddConstant(R0, R0, -0x10001, AL, kCcSet); // 32-bit SUBS, encoding T3.
- __ AddConstant(R1, R1, -0x10002, AL, kCcSet); // MVN+ADDS.
- __ AddConstant(R0, R0, -0x10003, AL, kCcSet); // MOVW+MOVT+ADDS.
-
- __ it(EQ);
- __ AddConstant(R0, R1, 1, EQ, kCcSet); // 32-bit ADDS, encoding T3.
- __ it(NE);
- __ AddConstant(R0, R1, 1, NE, kCcKeep); // 16-bit ADDS, encoding T1.
- __ it(GE);
- __ AddConstant(R0, R0, 1, GE, kCcSet); // 32-bit ADDS, encoding T3.
- __ it(LE);
- __ AddConstant(R0, R0, 1, LE, kCcKeep); // 16-bit ADDS, encoding T2.
-
- EmitAndCheck(&assembler, "AddConstant");
-}
-
-TEST_F(Thumb2AssemblerTest, CmpConstant) {
- __ CmpConstant(R0, 0); // 16-bit CMP.
- __ CmpConstant(R1, 1); // 16-bit CMP.
- __ CmpConstant(R0, 7); // 16-bit CMP.
- __ CmpConstant(R1, 8); // 16-bit CMP.
- __ CmpConstant(R0, 255); // 16-bit CMP.
- __ CmpConstant(R1, 256); // 32-bit CMP.
- __ CmpConstant(R0, 257); // MNV+CMN.
- __ CmpConstant(R1, 0xfff); // MOVW+CMP.
- __ CmpConstant(R0, 0x1000); // 32-bit CMP.
- __ CmpConstant(R1, 0x1001); // MNV+CMN.
- __ CmpConstant(R0, 0x1002); // MOVW+CMP.
- __ CmpConstant(R1, 0xffff); // MOVW+CMP.
- __ CmpConstant(R0, 0x10000); // 32-bit CMP.
- __ CmpConstant(R1, 0x10001); // 32-bit CMP.
- __ CmpConstant(R0, 0x10002); // MVN+CMN.
- __ CmpConstant(R1, 0x10003); // MOVW+MOVT+CMP.
- __ CmpConstant(R0, -1); // 32-bit CMP.
- __ CmpConstant(R1, -7); // CMN.
- __ CmpConstant(R0, -8); // CMN.
- __ CmpConstant(R1, -255); // CMN.
- __ CmpConstant(R0, -256); // CMN.
- __ CmpConstant(R1, -257); // MNV+CMP.
- __ CmpConstant(R0, -0xfff); // MOVW+CMN.
- __ CmpConstant(R1, -0x1000); // CMN.
- __ CmpConstant(R0, -0x1001); // MNV+CMP.
- __ CmpConstant(R1, -0x1002); // MOVW+CMN.
- __ CmpConstant(R0, -0xffff); // MOVW+CMN.
- __ CmpConstant(R1, -0x10000); // CMN.
- __ CmpConstant(R0, -0x10001); // CMN.
- __ CmpConstant(R1, -0x10002); // MVN+CMP.
- __ CmpConstant(R0, -0x10003); // MOVW+MOVT+CMP.
-
- __ CmpConstant(R8, 0); // 32-bit CMP.
- __ CmpConstant(R9, 1); // 32-bit CMP.
- __ CmpConstant(R8, 7); // 32-bit CMP.
- __ CmpConstant(R9, 8); // 32-bit CMP.
- __ CmpConstant(R8, 255); // 32-bit CMP.
- __ CmpConstant(R9, 256); // 32-bit CMP.
- __ CmpConstant(R8, 257); // MNV+CMN
- __ CmpConstant(R9, 0xfff); // MOVW+CMP.
- __ CmpConstant(R8, 0x1000); // 32-bit CMP.
- __ CmpConstant(R9, 0x1001); // MVN+CMN.
- __ CmpConstant(R8, 0x1002); // MOVW+CMP.
- __ CmpConstant(R9, 0xffff); // MOVW+CMP.
- __ CmpConstant(R8, 0x10000); // 32-bit CMP.
- __ CmpConstant(R9, 0x10001); // 32-bit CMP.
- __ CmpConstant(R8, 0x10002); // MVN+CMN.
- __ CmpConstant(R9, 0x10003); // MOVW+MOVT+CMP.
- __ CmpConstant(R8, -1); // 32-bit CMP
- __ CmpConstant(R9, -7); // CMN.
- __ CmpConstant(R8, -8); // CMN.
- __ CmpConstant(R9, -255); // CMN.
- __ CmpConstant(R8, -256); // CMN.
- __ CmpConstant(R9, -257); // MNV+CMP.
- __ CmpConstant(R8, -0xfff); // MOVW+CMN.
- __ CmpConstant(R9, -0x1000); // CMN.
- __ CmpConstant(R8, -0x1001); // MVN+CMP.
- __ CmpConstant(R9, -0x1002); // MOVW+CMN.
- __ CmpConstant(R8, -0xffff); // MOVW+CMN.
- __ CmpConstant(R9, -0x10000); // CMN.
- __ CmpConstant(R8, -0x10001); // CMN.
- __ CmpConstant(R9, -0x10002); // MVN+CMP.
- __ CmpConstant(R8, -0x10003); // MOVW+MOVT+CMP.
-
- EmitAndCheck(&assembler, "CmpConstant");
-}
-
-#define ENABLE_VIXL_TEST
-
-#ifdef ENABLE_VIXL_TEST
-
-#define ARM_VIXL
-
-#ifdef ARM_VIXL
-typedef arm::ArmVIXLJNIMacroAssembler JniAssemblerType;
-#else
-typedef arm::Thumb2Assembler AssemblerType;
-#endif
-
class ArmVIXLAssemblerTest : public ::testing::Test {
public:
ArmVIXLAssemblerTest() : pool(), arena(&pool), assembler(&arena) { }
ArenaPool pool;
ArenaAllocator arena;
- JniAssemblerType assembler;
+ ArmVIXLJNIMacroAssembler assembler;
};
-#undef __
#define __ assembler->
-void EmitAndCheck(JniAssemblerType* assembler, const char* testname,
+void EmitAndCheck(ArmVIXLJNIMacroAssembler* assembler, const char* testname,
const char* const* results) {
__ FinalizeCode();
size_t cs = __ CodeSize();
@@ -1631,7 +197,7 @@
DumpAndCheck(managed_code, testname, results);
}
-void EmitAndCheck(JniAssemblerType* assembler, const char* testname) {
+void EmitAndCheck(ArmVIXLJNIMacroAssembler* assembler, const char* testname) {
InitResults();
std::map<std::string, const char* const*>::iterator results = test_results.find(testname);
ASSERT_NE(results, test_results.end());
@@ -1640,6 +206,7 @@
}
#undef __
+
#define __ assembler.
TEST_F(ArmVIXLAssemblerTest, VixlJniHelpers) {
@@ -1733,14 +300,15 @@
EmitAndCheck(&assembler, "VixlJniHelpers");
}
-#ifdef ARM_VIXL
+#undef __
+
+// TODO: Avoid these macros.
#define R0 vixl::aarch32::r0
#define R2 vixl::aarch32::r2
#define R4 vixl::aarch32::r4
#define R12 vixl::aarch32::r12
-#undef __
+
#define __ assembler.asm_.
-#endif
TEST_F(ArmVIXLAssemblerTest, VixlLoadFromOffset) {
__ LoadFromOffset(kLoadWord, R2, R4, 12);
@@ -1807,6 +375,5 @@
}
#undef __
-#endif // ENABLE_VIXL_TEST
} // namespace arm
} // namespace art
diff --git a/compiler/utils/assembler_thumb_test_expected.cc.inc b/compiler/utils/assembler_thumb_test_expected.cc.inc
index 563d135..0a09435 100644
--- a/compiler/utils/assembler_thumb_test_expected.cc.inc
+++ b/compiler/utils/assembler_thumb_test_expected.cc.inc
@@ -1,5462 +1,3 @@
-const char* const SimpleMovResults[] = {
- " 0: 0008 movs r0, r1\n",
- " 2: 4608 mov r0, r1\n",
- " 4: 46c8 mov r8, r9\n",
- " 6: 2001 movs r0, #1\n",
- " 8: f04f 0809 mov.w r8, #9\n",
- nullptr
-};
-const char* const SimpleMov32Results[] = {
- " 0: ea4f 0001 mov.w r0, r1\n",
- " 4: ea4f 0809 mov.w r8, r9\n",
- nullptr
-};
-const char* const SimpleMovAddResults[] = {
- " 0: 4608 mov r0, r1\n",
- " 2: 1888 adds r0, r1, r2\n",
- " 4: 1c08 adds r0, r1, #0\n",
- nullptr
-};
-const char* const DataProcessingRegisterResults[] = {
- " 0: ea6f 0001 mvn.w r0, r1\n",
- " 4: eb01 0002 add.w r0, r1, r2\n",
- " 8: eba1 0002 sub.w r0, r1, r2\n",
- " c: ea01 0002 and.w r0, r1, r2\n",
- " 10: ea41 0002 orr.w r0, r1, r2\n",
- " 14: ea61 0002 orn r0, r1, r2\n",
- " 18: ea81 0002 eor.w r0, r1, r2\n",
- " 1c: ea21 0002 bic.w r0, r1, r2\n",
- " 20: eb41 0002 adc.w r0, r1, r2\n",
- " 24: eb61 0002 sbc.w r0, r1, r2\n",
- " 28: ebc1 0002 rsb r0, r1, r2\n",
- " 2c: ea90 0f01 teq r0, r1\n",
- " 30: 0008 movs r0, r1\n",
- " 32: 4608 mov r0, r1\n",
- " 34: 43c8 mvns r0, r1\n",
- " 36: 4408 add r0, r1\n",
- " 38: 1888 adds r0, r1, r2\n",
- " 3a: 1a88 subs r0, r1, r2\n",
- " 3c: 4148 adcs r0, r1\n",
- " 3e: 4188 sbcs r0, r1\n",
- " 40: 4008 ands r0, r1\n",
- " 42: 4308 orrs r0, r1\n",
- " 44: 4048 eors r0, r1\n",
- " 46: 4388 bics r0, r1\n",
- " 48: 4208 tst r0, r1\n",
- " 4a: 4288 cmp r0, r1\n",
- " 4c: 42c8 cmn r0, r1\n",
- " 4e: 4641 mov r1, r8\n",
- " 50: 4681 mov r9, r0\n",
- " 52: 46c8 mov r8, r9\n",
- " 54: 4441 add r1, r8\n",
- " 56: 4481 add r9, r0\n",
- " 58: 44c8 add r8, r9\n",
- " 5a: 4548 cmp r0, r9\n",
- " 5c: 4588 cmp r8, r1\n",
- " 5e: 45c1 cmp r9, r8\n",
- " 60: 4248 negs r0, r1\n",
- " 62: 4240 negs r0, r0\n",
- " 64: ea5f 0008 movs.w r0, r8\n",
- " 68: ea7f 0008 mvns.w r0, r8\n",
- " 6c: eb01 0008 add.w r0, r1, r8\n",
- " 70: eb11 0008 adds.w r0, r1, r8\n",
- " 74: ebb1 0008 subs.w r0, r1, r8\n",
- " 78: eb50 0008 adcs.w r0, r0, r8\n",
- " 7c: eb70 0008 sbcs.w r0, r0, r8\n",
- " 80: ea10 0008 ands.w r0, r0, r8\n",
- " 84: ea50 0008 orrs.w r0, r0, r8\n",
- " 88: ea90 0008 eors.w r0, r0, r8\n",
- " 8c: ea30 0008 bics.w r0, r0, r8\n",
- " 90: ea10 0f08 tst.w r0, r8\n",
- " 94: eb10 0f08 cmn.w r0, r8\n",
- " 98: f1d8 0000 rsbs r0, r8, #0\n",
- " 9c: f1d8 0800 rsbs r8, r8, #0\n",
- " a0: bf08 it eq\n",
- " a2: ea7f 0001 mvnseq.w r0, r1\n",
- " a6: bf08 it eq\n",
- " a8: eb11 0002 addseq.w r0, r1, r2\n",
- " ac: bf08 it eq\n",
- " ae: ebb1 0002 subseq.w r0, r1, r2\n",
- " b2: bf08 it eq\n",
- " b4: eb50 0001 adcseq.w r0, r0, r1\n",
- " b8: bf08 it eq\n",
- " ba: eb70 0001 sbcseq.w r0, r0, r1\n",
- " be: bf08 it eq\n",
- " c0: ea10 0001 andseq.w r0, r0, r1\n",
- " c4: bf08 it eq\n",
- " c6: ea50 0001 orrseq.w r0, r0, r1\n",
- " ca: bf08 it eq\n",
- " cc: ea90 0001 eorseq.w r0, r0, r1\n",
- " d0: bf08 it eq\n",
- " d2: ea30 0001 bicseq.w r0, r0, r1\n",
- " d6: bf08 it eq\n",
- " d8: 43c8 mvneq r0, r1\n",
- " da: bf08 it eq\n",
- " dc: 1888 addeq r0, r1, r2\n",
- " de: bf08 it eq\n",
- " e0: 1a88 subeq r0, r1, r2\n",
- " e2: bf08 it eq\n",
- " e4: 4148 adceq r0, r1\n",
- " e6: bf08 it eq\n",
- " e8: 4188 sbceq r0, r1\n",
- " ea: bf08 it eq\n",
- " ec: 4008 andeq r0, r1\n",
- " ee: bf08 it eq\n",
- " f0: 4308 orreq r0, r1\n",
- " f2: bf08 it eq\n",
- " f4: 4048 eoreq r0, r1\n",
- " f6: bf08 it eq\n",
- " f8: 4388 biceq r0, r1\n",
- " fa: 4608 mov r0, r1\n",
- " fc: 43c8 mvns r0, r1\n",
- " fe: 4408 add r0, r1\n",
- " 100: 1888 adds r0, r1, r2\n",
- " 102: 1a88 subs r0, r1, r2\n",
- " 104: 4148 adcs r0, r1\n",
- " 106: 4188 sbcs r0, r1\n",
- " 108: 4008 ands r0, r1\n",
- " 10a: 4308 orrs r0, r1\n",
- " 10c: 4048 eors r0, r1\n",
- " 10e: 4388 bics r0, r1\n",
- " 110: 4641 mov r1, r8\n",
- " 112: 4681 mov r9, r0\n",
- " 114: 46c8 mov r8, r9\n",
- " 116: 4441 add r1, r8\n",
- " 118: 4481 add r9, r0\n",
- " 11a: 44c8 add r8, r9\n",
- " 11c: 4248 negs r0, r1\n",
- " 11e: 4240 negs r0, r0\n",
- " 120: eb01 0c00 add.w ip, r1, r0\n",
- nullptr
-};
-const char* const DataProcessingImmediateResults[] = {
- " 0: 2055 movs r0, #85 ; 0x55\n",
- " 2: f06f 0055 mvn.w r0, #85 ; 0x55\n",
- " 6: f101 0055 add.w r0, r1, #85 ; 0x55\n",
- " a: f1a1 0055 sub.w r0, r1, #85 ; 0x55\n",
- " e: f001 0055 and.w r0, r1, #85 ; 0x55\n",
- " 12: f041 0055 orr.w r0, r1, #85 ; 0x55\n",
- " 16: f061 0055 orn r0, r1, #85 ; 0x55\n",
- " 1a: f081 0055 eor.w r0, r1, #85 ; 0x55\n",
- " 1e: f021 0055 bic.w r0, r1, #85 ; 0x55\n",
- " 22: f141 0055 adc.w r0, r1, #85 ; 0x55\n",
- " 26: f161 0055 sbc.w r0, r1, #85 ; 0x55\n",
- " 2a: f1c1 0055 rsb r0, r1, #85 ; 0x55\n",
- " 2e: f010 0f55 tst.w r0, #85 ; 0x55\n",
- " 32: f090 0f55 teq r0, #85 ; 0x55\n",
- " 36: 2855 cmp r0, #85 ; 0x55\n",
- " 38: f110 0f55 cmn.w r0, #85 ; 0x55\n",
- " 3c: 1d48 adds r0, r1, #5\n",
- " 3e: 1f48 subs r0, r1, #5\n",
- " 40: 2055 movs r0, #85 ; 0x55\n",
- " 42: f07f 0055 mvns.w r0, #85 ; 0x55\n",
- " 46: 1d48 adds r0, r1, #5\n",
- " 48: 1f48 subs r0, r1, #5\n",
- nullptr
-};
-const char* const DataProcessingModifiedImmediateResults[] = {
- " 0: f04f 1055 mov.w r0, #5570645 ; 0x550055\n",
- " 4: f06f 1055 mvn.w r0, #5570645 ; 0x550055\n",
- " 8: f101 1055 add.w r0, r1, #5570645 ; 0x550055\n",
- " c: f1a1 1055 sub.w r0, r1, #5570645 ; 0x550055\n",
- " 10: f001 1055 and.w r0, r1, #5570645 ; 0x550055\n",
- " 14: f041 1055 orr.w r0, r1, #5570645 ; 0x550055\n",
- " 18: f061 1055 orn r0, r1, #5570645 ; 0x550055\n",
- " 1c: f081 1055 eor.w r0, r1, #5570645 ; 0x550055\n",
- " 20: f021 1055 bic.w r0, r1, #5570645 ; 0x550055\n",
- " 24: f141 1055 adc.w r0, r1, #5570645 ; 0x550055\n",
- " 28: f161 1055 sbc.w r0, r1, #5570645 ; 0x550055\n",
- " 2c: f1c1 1055 rsb r0, r1, #5570645 ; 0x550055\n",
- " 30: f010 1f55 tst.w r0, #5570645 ; 0x550055\n",
- " 34: f090 1f55 teq r0, #5570645 ; 0x550055\n",
- " 38: f1b0 1f55 cmp.w r0, #5570645 ; 0x550055\n",
- " 3c: f110 1f55 cmn.w r0, #5570645 ; 0x550055\n",
- nullptr
-};
-const char* const DataProcessingModifiedImmediatesResults[] = {
- " 0: f04f 1055 mov.w r0, #5570645 ; 0x550055\n",
- " 4: f04f 2055 mov.w r0, #1426085120 ; 0x55005500\n",
- " 8: f04f 3055 mov.w r0, #1431655765 ; 0x55555555\n",
- " c: f04f 4055 mov.w r0, #3573547008 ; 0xd5000000\n",
- " 10: f04f 40d4 mov.w r0, #1778384896 ; 0x6a000000\n",
- " 14: f44f 7054 mov.w r0, #848 ; 0x350\n",
- " 18: f44f 70d4 mov.w r0, #424 ; 0x1a8\n",
- nullptr
-};
-const char* const DataProcessingShiftedRegisterResults[] = {
- " 0: 0123 lsls r3, r4, #4\n",
- " 2: 0963 lsrs r3, r4, #5\n",
- " 4: 11a3 asrs r3, r4, #6\n",
- " 6: ea5f 13f4 movs.w r3, r4, ror #7\n",
- " a: ea5f 0334 movs.w r3, r4, rrx\n",
- " e: ea4f 1304 mov.w r3, r4, lsl #4\n",
- " 12: ea4f 1354 mov.w r3, r4, lsr #5\n",
- " 16: ea4f 13a4 mov.w r3, r4, asr #6\n",
- " 1a: ea4f 13f4 mov.w r3, r4, ror #7\n",
- " 1e: ea4f 0334 mov.w r3, r4, rrx\n",
- " 22: ea5f 1804 movs.w r8, r4, lsl #4\n",
- " 26: ea5f 1854 movs.w r8, r4, lsr #5\n",
- " 2a: ea5f 18a4 movs.w r8, r4, asr #6\n",
- " 2e: ea5f 18f4 movs.w r8, r4, ror #7\n",
- " 32: ea5f 0834 movs.w r8, r4, rrx\n",
- nullptr
-};
-const char* const ShiftImmediateResults[] = {
- " 0: 0123 lsls r3, r4, #4\n",
- " 2: 0963 lsrs r3, r4, #5\n",
- " 4: 11a3 asrs r3, r4, #6\n",
- " 6: ea4f 13f4 mov.w r3, r4, ror #7\n",
- " a: ea4f 0334 mov.w r3, r4, rrx\n",
- " e: ea4f 1304 mov.w r3, r4, lsl #4\n",
- " 12: ea4f 1354 mov.w r3, r4, lsr #5\n",
- " 16: ea4f 13a4 mov.w r3, r4, asr #6\n",
- " 1a: ea4f 13f4 mov.w r3, r4, ror #7\n",
- " 1e: ea4f 0334 mov.w r3, r4, rrx\n",
- " 22: ea5f 1804 movs.w r8, r4, lsl #4\n",
- " 26: ea5f 1854 movs.w r8, r4, lsr #5\n",
- " 2a: ea5f 18a4 movs.w r8, r4, asr #6\n",
- " 2e: ea5f 18f4 movs.w r8, r4, ror #7\n",
- " 32: ea5f 0834 movs.w r8, r4, rrx\n",
- nullptr
-};
-const char* const BasicLoadResults[] = {
- " 0: 69a3 ldr r3, [r4, #24]\n",
- " 2: 7e23 ldrb r3, [r4, #24]\n",
- " 4: 8b23 ldrh r3, [r4, #24]\n",
- " 6: f994 3018 ldrsb.w r3, [r4, #24]\n",
- " a: f9b4 3018 ldrsh.w r3, [r4, #24]\n",
- " e: 9b06 ldr r3, [sp, #24]\n",
- " 10: f8d4 8018 ldr.w r8, [r4, #24]\n",
- " 14: f894 8018 ldrb.w r8, [r4, #24]\n",
- " 18: f8b4 8018 ldrh.w r8, [r4, #24]\n",
- " 1c: f994 8018 ldrsb.w r8, [r4, #24]\n",
- " 20: f9b4 8018 ldrsh.w r8, [r4, #24]\n",
- nullptr
-};
-const char* const BasicStoreResults[] = {
- " 0: 61a3 str r3, [r4, #24]\n",
- " 2: 7623 strb r3, [r4, #24]\n",
- " 4: 8323 strh r3, [r4, #24]\n",
- " 6: 9306 str r3, [sp, #24]\n",
- " 8: f8c4 8018 str.w r8, [r4, #24]\n",
- " c: f884 8018 strb.w r8, [r4, #24]\n",
- " 10: f8a4 8018 strh.w r8, [r4, #24]\n",
- nullptr
-};
-const char* const ComplexLoadResults[] = {
- " 0: 69a3 ldr r3, [r4, #24]\n",
- " 2: f854 3f18 ldr.w r3, [r4, #24]!\n",
- " 6: f854 3b18 ldr.w r3, [r4], #24\n",
- " a: f854 3c18 ldr.w r3, [r4, #-24]\n",
- " e: f854 3d18 ldr.w r3, [r4, #-24]!\n",
- " 12: f854 3918 ldr.w r3, [r4], #-24\n",
- " 16: 7e23 ldrb r3, [r4, #24]\n",
- " 18: f814 3f18 ldrb.w r3, [r4, #24]!\n",
- " 1c: f814 3b18 ldrb.w r3, [r4], #24\n",
- " 20: f814 3c18 ldrb.w r3, [r4, #-24]\n",
- " 24: f814 3d18 ldrb.w r3, [r4, #-24]!\n",
- " 28: f814 3918 ldrb.w r3, [r4], #-24\n",
- " 2c: 8b23 ldrh r3, [r4, #24]\n",
- " 2e: f834 3f18 ldrh.w r3, [r4, #24]!\n",
- " 32: f834 3b18 ldrh.w r3, [r4], #24\n",
- " 36: f834 3c18 ldrh.w r3, [r4, #-24]\n",
- " 3a: f834 3d18 ldrh.w r3, [r4, #-24]!\n",
- " 3e: f834 3918 ldrh.w r3, [r4], #-24\n",
- " 42: f994 3018 ldrsb.w r3, [r4, #24]\n",
- " 46: f914 3f18 ldrsb.w r3, [r4, #24]!\n",
- " 4a: f914 3b18 ldrsb.w r3, [r4], #24\n",
- " 4e: f914 3c18 ldrsb.w r3, [r4, #-24]\n",
- " 52: f914 3d18 ldrsb.w r3, [r4, #-24]!\n",
- " 56: f914 3918 ldrsb.w r3, [r4], #-24\n",
- " 5a: f9b4 3018 ldrsh.w r3, [r4, #24]\n",
- " 5e: f934 3f18 ldrsh.w r3, [r4, #24]!\n",
- " 62: f934 3b18 ldrsh.w r3, [r4], #24\n",
- " 66: f934 3c18 ldrsh.w r3, [r4, #-24]\n",
- " 6a: f934 3d18 ldrsh.w r3, [r4, #-24]!\n",
- " 6e: f934 3918 ldrsh.w r3, [r4], #-24\n",
- nullptr
-};
-const char* const ComplexStoreResults[] = {
- " 0: 61a3 str r3, [r4, #24]\n",
- " 2: f844 3f18 str.w r3, [r4, #24]!\n",
- " 6: f844 3b18 str.w r3, [r4], #24\n",
- " a: f844 3c18 str.w r3, [r4, #-24]\n",
- " e: f844 3d18 str.w r3, [r4, #-24]!\n",
- " 12: f844 3918 str.w r3, [r4], #-24\n",
- " 16: 7623 strb r3, [r4, #24]\n",
- " 18: f804 3f18 strb.w r3, [r4, #24]!\n",
- " 1c: f804 3b18 strb.w r3, [r4], #24\n",
- " 20: f804 3c18 strb.w r3, [r4, #-24]\n",
- " 24: f804 3d18 strb.w r3, [r4, #-24]!\n",
- " 28: f804 3918 strb.w r3, [r4], #-24\n",
- " 2c: 8323 strh r3, [r4, #24]\n",
- " 2e: f824 3f18 strh.w r3, [r4, #24]!\n",
- " 32: f824 3b18 strh.w r3, [r4], #24\n",
- " 36: f824 3c18 strh.w r3, [r4, #-24]\n",
- " 3a: f824 3d18 strh.w r3, [r4, #-24]!\n",
- " 3e: f824 3918 strh.w r3, [r4], #-24\n",
- nullptr
-};
-const char* const NegativeLoadStoreResults[] = {
- " 0: f854 3c18 ldr.w r3, [r4, #-24]\n",
- " 4: f854 3d18 ldr.w r3, [r4, #-24]!\n",
- " 8: f854 3918 ldr.w r3, [r4], #-24\n",
- " c: f854 3e18 ldrt r3, [r4, #24]\n",
- " 10: f854 3f18 ldr.w r3, [r4, #24]!\n",
- " 14: f854 3b18 ldr.w r3, [r4], #24\n",
- " 18: f814 3c18 ldrb.w r3, [r4, #-24]\n",
- " 1c: f814 3d18 ldrb.w r3, [r4, #-24]!\n",
- " 20: f814 3918 ldrb.w r3, [r4], #-24\n",
- " 24: f814 3e18 ldrbt r3, [r4, #24]\n",
- " 28: f814 3f18 ldrb.w r3, [r4, #24]!\n",
- " 2c: f814 3b18 ldrb.w r3, [r4], #24\n",
- " 30: f834 3c18 ldrh.w r3, [r4, #-24]\n",
- " 34: f834 3d18 ldrh.w r3, [r4, #-24]!\n",
- " 38: f834 3918 ldrh.w r3, [r4], #-24\n",
- " 3c: f834 3e18 ldrht r3, [r4, #24]\n",
- " 40: f834 3f18 ldrh.w r3, [r4, #24]!\n",
- " 44: f834 3b18 ldrh.w r3, [r4], #24\n",
- " 48: f914 3c18 ldrsb.w r3, [r4, #-24]\n",
- " 4c: f914 3d18 ldrsb.w r3, [r4, #-24]!\n",
- " 50: f914 3918 ldrsb.w r3, [r4], #-24\n",
- " 54: f914 3e18 ldrsbt r3, [r4, #24]\n",
- " 58: f914 3f18 ldrsb.w r3, [r4, #24]!\n",
- " 5c: f914 3b18 ldrsb.w r3, [r4], #24\n",
- " 60: f934 3c18 ldrsh.w r3, [r4, #-24]\n",
- " 64: f934 3d18 ldrsh.w r3, [r4, #-24]!\n",
- " 68: f934 3918 ldrsh.w r3, [r4], #-24\n",
- " 6c: f934 3e18 ldrsht r3, [r4, #24]\n",
- " 70: f934 3f18 ldrsh.w r3, [r4, #24]!\n",
- " 74: f934 3b18 ldrsh.w r3, [r4], #24\n",
- " 78: f844 3c18 str.w r3, [r4, #-24]\n",
- " 7c: f844 3d18 str.w r3, [r4, #-24]!\n",
- " 80: f844 3918 str.w r3, [r4], #-24\n",
- " 84: f844 3e18 strt r3, [r4, #24]\n",
- " 88: f844 3f18 str.w r3, [r4, #24]!\n",
- " 8c: f844 3b18 str.w r3, [r4], #24\n",
- " 90: f804 3c18 strb.w r3, [r4, #-24]\n",
- " 94: f804 3d18 strb.w r3, [r4, #-24]!\n",
- " 98: f804 3918 strb.w r3, [r4], #-24\n",
- " 9c: f804 3e18 strbt r3, [r4, #24]\n",
- " a0: f804 3f18 strb.w r3, [r4, #24]!\n",
- " a4: f804 3b18 strb.w r3, [r4], #24\n",
- " a8: f824 3c18 strh.w r3, [r4, #-24]\n",
- " ac: f824 3d18 strh.w r3, [r4, #-24]!\n",
- " b0: f824 3918 strh.w r3, [r4], #-24\n",
- " b4: f824 3e18 strht r3, [r4, #24]\n",
- " b8: f824 3f18 strh.w r3, [r4, #24]!\n",
- " bc: f824 3b18 strh.w r3, [r4], #24\n",
- nullptr
-};
-const char* const SimpleLoadStoreDualResults[] = {
- " 0: e9c0 2306 strd r2, r3, [r0, #24]\n",
- " 4: e9d0 2306 ldrd r2, r3, [r0, #24]\n",
- nullptr
-};
-const char* const ComplexLoadStoreDualResults[] = {
- " 0: e9c0 2306 strd r2, r3, [r0, #24]\n",
- " 4: e9e0 2306 strd r2, r3, [r0, #24]!\n",
- " 8: e8e0 2306 strd r2, r3, [r0], #24\n",
- " c: e940 2306 strd r2, r3, [r0, #-24]\n",
- " 10: e960 2306 strd r2, r3, [r0, #-24]!\n",
- " 14: e860 2306 strd r2, r3, [r0], #-24\n",
- " 18: e9d0 2306 ldrd r2, r3, [r0, #24]\n",
- " 1c: e9f0 2306 ldrd r2, r3, [r0, #24]!\n",
- " 20: e8f0 2306 ldrd r2, r3, [r0], #24\n",
- " 24: e950 2306 ldrd r2, r3, [r0, #-24]\n",
- " 28: e970 2306 ldrd r2, r3, [r0, #-24]!\n",
- " 2c: e870 2306 ldrd r2, r3, [r0], #-24\n",
- nullptr
-};
-const char* const NegativeLoadStoreDualResults[] = {
- " 0: e940 2306 strd r2, r3, [r0, #-24]\n",
- " 4: e960 2306 strd r2, r3, [r0, #-24]!\n",
- " 8: e860 2306 strd r2, r3, [r0], #-24\n",
- " c: e9c0 2306 strd r2, r3, [r0, #24]\n",
- " 10: e9e0 2306 strd r2, r3, [r0, #24]!\n",
- " 14: e8e0 2306 strd r2, r3, [r0], #24\n",
- " 18: e950 2306 ldrd r2, r3, [r0, #-24]\n",
- " 1c: e970 2306 ldrd r2, r3, [r0, #-24]!\n",
- " 20: e870 2306 ldrd r2, r3, [r0], #-24\n",
- " 24: e9d0 2306 ldrd r2, r3, [r0, #24]\n",
- " 28: e9f0 2306 ldrd r2, r3, [r0, #24]!\n",
- " 2c: e8f0 2306 ldrd r2, r3, [r0], #24\n",
- nullptr
-};
-const char* const SimpleBranchResults[] = {
- " 0: 2002 movs r0, #2\n",
- " 2: 2101 movs r1, #1\n",
- " 4: e7fd b.n 2 <SimpleBranch+0x2>\n",
- " 6: e000 b.n a <SimpleBranch+0xa>\n",
- " 8: 2102 movs r1, #2\n",
- " a: 2003 movs r0, #3\n",
- " c: 2002 movs r0, #2\n",
- " e: 2101 movs r1, #1\n",
- " 10: d0fd beq.n e <SimpleBranch+0xe>\n",
- " 12: d000 beq.n 16 <SimpleBranch+0x16>\n",
- " 14: 2102 movs r1, #2\n",
- " 16: 2003 movs r0, #3\n",
- " 18: e002 b.n 20 <SimpleBranch+0x20>\n",
- " 1a: 2104 movs r1, #4\n",
- " 1c: e000 b.n 20 <SimpleBranch+0x20>\n",
- " 1e: 2105 movs r1, #5\n",
- " 20: 2006 movs r0, #6\n",
- nullptr
-};
-const char* const LongBranchResults[] = {
- " 0: f04f 0002 mov.w r0, #2\n",
- " 4: f04f 0101 mov.w r1, #1\n",
- " 8: f7ff bffc b.w 4 <LongBranch+0x4>\n",
- " c: f000 b802 b.w 14 <LongBranch+0x14>\n",
- " 10: f04f 0102 mov.w r1, #2\n",
- " 14: f04f 0003 mov.w r0, #3\n",
- " 18: f04f 0002 mov.w r0, #2\n",
- " 1c: f04f 0101 mov.w r1, #1\n",
- " 20: f43f affc beq.w 1c <LongBranch+0x1c>\n",
- " 24: f000 8002 beq.w 2c <LongBranch+0x2c>\n",
- " 28: f04f 0102 mov.w r1, #2\n",
- " 2c: f04f 0003 mov.w r0, #3\n",
- " 30: f000 b806 b.w 40 <LongBranch+0x40>\n",
- " 34: f04f 0104 mov.w r1, #4\n",
- " 38: f000 b802 b.w 40 <LongBranch+0x40>\n",
- " 3c: f04f 0105 mov.w r1, #5\n",
- " 40: f04f 0006 mov.w r0, #6\n",
- nullptr
-};
-const char* const LoadMultipleResults[] = {
- " 0: cc09 ldmia r4!, {r0, r3}\n",
- " 2: e934 4800 ldmdb r4!, {fp, lr}\n",
- " 6: e914 4800 ldmdb r4, {fp, lr}\n",
- " a: f854 5b04 ldr.w r5, [r4], #4\n",
- nullptr
-};
-const char* const StoreMultipleResults[] = {
- " 0: c409 stmia r4!, {r0, r3}\n",
- " 2: e8a4 4800 stmia.w r4!, {fp, lr}\n",
- " 6: e884 4800 stmia.w r4, {fp, lr}\n",
- " a: f844 5c04 str.w r5, [r4, #-4]\n",
- " e: f844 5d04 str.w r5, [r4, #-4]!\n",
- nullptr
-};
-const char* const MovWMovTResults[] = {
- " 0: f240 0400 movw r4, #0\n",
- " 4: f240 0434 movw r4, #52 ; 0x34\n",
- " 8: f240 0934 movw r9, #52 ; 0x34\n",
- " c: f241 2334 movw r3, #4660 ; 0x1234\n",
- " 10: f64f 79ff movw r9, #65535 ; 0xffff\n",
- " 14: f2c0 0000 movt r0, #0\n",
- " 18: f2c1 2034 movt r0, #4660 ; 0x1234\n",
- " 1c: f6cf 71ff movt r1, #65535 ; 0xffff\n",
- nullptr
-};
-const char* const SpecialAddSubResults[] = {
- " 0: aa14 add r2, sp, #80 ; 0x50\n",
- " 2: b014 add sp, #80 ; 0x50\n",
- " 4: f10d 0850 add.w r8, sp, #80 ; 0x50\n",
- " 8: f50d 6270 add.w r2, sp, #3840 ; 0xf00\n",
- " c: f50d 6d70 add.w sp, sp, #3840 ; 0xf00\n",
- " 10: f60d 7dfc addw sp, sp, #4092 ; 0xffc\n",
- " 14: b094 sub sp, #80 ; 0x50\n",
- " 16: f1ad 0050 sub.w r0, sp, #80 ; 0x50\n",
- " 1a: f1ad 0850 sub.w r8, sp, #80 ; 0x50\n",
- " 1e: f5ad 6d70 sub.w sp, sp, #3840 ; 0xf00\n",
- " 22: f6ad 7dfc subw sp, sp, #4092 ; 0xffc\n",
- nullptr
-};
-const char* const LoadFromOffsetResults[] = {
- " 0: 68e2 ldr r2, [r4, #12]\n",
- " 2: f8d4 2fff ldr.w r2, [r4, #4095] ; 0xfff\n",
- " 6: f504 5280 add.w r2, r4, #4096 ; 0x1000\n",
- " a: 6812 ldr r2, [r2, #0]\n",
- " c: f504 1280 add.w r2, r4, #1048576 ; 0x100000\n",
- " 10: f8d2 20a4 ldr.w r2, [r2, #164] ; 0xa4\n",
- " 14: f241 0200 movw r2, #4096 ; 0x1000\n",
- " 18: f2c0 0210 movt r2, #16\n",
- " 1c: 4422 add r2, r4\n",
- " 1e: 6812 ldr r2, [r2, #0]\n",
- " 20: f241 0c00 movw ip, #4096 ; 0x1000\n",
- " 24: f2c0 0c10 movt ip, #16\n",
- " 28: 4464 add r4, ip\n",
- " 2a: 6824 ldr r4, [r4, #0]\n",
- " 2c: 89a2 ldrh r2, [r4, #12]\n",
- " 2e: f8b4 2fff ldrh.w r2, [r4, #4095] ; 0xfff\n",
- " 32: f504 5280 add.w r2, r4, #4096 ; 0x1000\n",
- " 36: 8812 ldrh r2, [r2, #0]\n",
- " 38: f504 1280 add.w r2, r4, #1048576 ; 0x100000\n",
- " 3c: f8b2 20a4 ldrh.w r2, [r2, #164] ; 0xa4\n",
- " 40: f241 0200 movw r2, #4096 ; 0x1000\n",
- " 44: f2c0 0210 movt r2, #16\n",
- " 48: 4422 add r2, r4\n",
- " 4a: 8812 ldrh r2, [r2, #0]\n",
- " 4c: f241 0c00 movw ip, #4096 ; 0x1000\n",
- " 50: f2c0 0c10 movt ip, #16\n",
- " 54: 4464 add r4, ip\n",
- " 56: 8824 ldrh r4, [r4, #0]\n",
- " 58: e9d4 2303 ldrd r2, r3, [r4, #12]\n",
- " 5c: e9d4 23ff ldrd r2, r3, [r4, #1020] ; 0x3fc\n",
- " 60: f504 6280 add.w r2, r4, #1024 ; 0x400\n",
- " 64: e9d2 2300 ldrd r2, r3, [r2]\n",
- " 68: f504 2280 add.w r2, r4, #262144 ; 0x40000\n",
- " 6c: e9d2 2329 ldrd r2, r3, [r2, #164]; 0xa4\n",
- " 70: f240 4200 movw r2, #1024 ; 0x400\n",
- " 74: f2c0 0204 movt r2, #4\n",
- " 78: 4422 add r2, r4\n",
- " 7a: e9d2 2300 ldrd r2, r3, [r2]\n",
- " 7e: f240 4c00 movw ip, #1024 ; 0x400\n",
- " 82: f2c0 0c04 movt ip, #4\n",
- " 86: 4464 add r4, ip\n",
- " 88: e9d4 4500 ldrd r4, r5, [r4]\n",
- " 8c: f8dc 000c ldr.w r0, [ip, #12]\n",
- " 90: f5a4 1280 sub.w r2, r4, #1048576 ; 0x100000\n",
- " 94: f8d2 20a4 ldr.w r2, [r2, #164] ; 0xa4\n",
- " 98: f994 200c ldrsb.w r2, [r4, #12]\n",
- " 9c: 7b22 ldrb r2, [r4, #12]\n",
- " 9e: f9b4 200c ldrsh.w r2, [r4, #12]\n",
- nullptr
-};
-const char* const StoreToOffsetResults[] = {
- " 0: 60e2 str r2, [r4, #12]\n",
- " 2: f8c4 2fff str.w r2, [r4, #4095] ; 0xfff\n",
- " 6: f504 5c80 add.w ip, r4, #4096 ; 0x1000\n",
- " a: f8cc 2000 str.w r2, [ip]\n",
- " e: f504 1c80 add.w ip, r4, #1048576 ; 0x100000\n",
- " 12: f8cc 20a4 str.w r2, [ip, #164] ; 0xa4\n",
- " 16: f241 0c00 movw ip, #4096 ; 0x1000\n",
- " 1a: f2c0 0c10 movt ip, #16\n",
- " 1e: 44a4 add ip, r4\n",
- " 20: f8cc 2000 str.w r2, [ip]\n",
- " 24: f241 0c00 movw ip, #4096 ; 0x1000\n",
- " 28: f2c0 0c10 movt ip, #16\n",
- " 2c: 44a4 add ip, r4\n",
- " 2e: f8cc 4000 str.w r4, [ip]\n",
- " 32: 81a2 strh r2, [r4, #12]\n",
- " 34: f8a4 2fff strh.w r2, [r4, #4095] ; 0xfff\n",
- " 38: f504 5c80 add.w ip, r4, #4096 ; 0x1000\n",
- " 3c: f8ac 2000 strh.w r2, [ip]\n",
- " 40: f504 1c80 add.w ip, r4, #1048576 ; 0x100000\n",
- " 44: f8ac 20a4 strh.w r2, [ip, #164] ; 0xa4\n",
- " 48: f241 0c00 movw ip, #4096 ; 0x1000\n",
- " 4c: f2c0 0c10 movt ip, #16\n",
- " 50: 44a4 add ip, r4\n",
- " 52: f8ac 2000 strh.w r2, [ip]\n",
- " 56: f241 0c00 movw ip, #4096 ; 0x1000\n",
- " 5a: f2c0 0c10 movt ip, #16\n",
- " 5e: 44a4 add ip, r4\n",
- " 60: f8ac 4000 strh.w r4, [ip]\n",
- " 64: e9c4 2303 strd r2, r3, [r4, #12]\n",
- " 68: e9c4 23ff strd r2, r3, [r4, #1020] ; 0x3fc\n",
- " 6c: f504 6c80 add.w ip, r4, #1024 ; 0x400\n",
- " 70: e9cc 2300 strd r2, r3, [ip]\n",
- " 74: f504 2c80 add.w ip, r4, #262144 ; 0x40000\n",
- " 78: e9cc 2329 strd r2, r3, [ip, #164]; 0xa4\n",
- " 7c: f240 4c00 movw ip, #1024 ; 0x400\n",
- " 80: f2c0 0c04 movt ip, #4\n",
- " 84: 44a4 add ip, r4\n",
- " 86: e9cc 2300 strd r2, r3, [ip]\n",
- " 8a: f240 4c00 movw ip, #1024 ; 0x400\n",
- " 8e: f2c0 0c04 movt ip, #4\n",
- " 92: 44a4 add ip, r4\n",
- " 94: e9cc 4500 strd r4, r5, [ip]\n",
- " 98: f8cc 000c str.w r0, [ip, #12]\n",
- " 9c: f5a4 1c80 sub.w ip, r4, #1048576 ; 0x100000\n",
- " a0: f8cc 20a4 str.w r2, [ip, #164] ; 0xa4\n",
- " a4: 7322 strb r2, [r4, #12]\n",
- nullptr
-};
-const char* const IfThenResults[] = {
- " 0: bf08 it eq\n",
- " 2: 2101 moveq r1, #1\n",
- " 4: bf04 itt eq\n",
- " 6: 2101 moveq r1, #1\n",
- " 8: 2202 moveq r2, #2\n",
- " a: bf0c ite eq\n",
- " c: 2101 moveq r1, #1\n",
- " e: 2202 movne r2, #2\n",
- " 10: bf06 itte eq\n",
- " 12: 2101 moveq r1, #1\n",
- " 14: 2202 moveq r2, #2\n",
- " 16: 2303 movne r3, #3\n",
- " 18: bf0e itee eq\n",
- " 1a: 2101 moveq r1, #1\n",
- " 1c: 2202 movne r2, #2\n",
- " 1e: 2303 movne r3, #3\n",
- " 20: bf03 ittte eq\n",
- " 22: 2101 moveq r1, #1\n",
- " 24: 2202 moveq r2, #2\n",
- " 26: 2303 moveq r3, #3\n",
- " 28: 2404 movne r4, #4\n",
- nullptr
-};
-const char* const CbzCbnzResults[] = {
- " 0: b10a cbz r2, 6 <CbzCbnz+0x6>\n",
- " 2: 2103 movs r1, #3\n",
- " 4: 2203 movs r2, #3\n",
- " 6: 2204 movs r2, #4\n",
- " 8: b912 cbnz r2, 10 <CbzCbnz+0x10>\n",
- " a: f04f 0803 mov.w r8, #3\n",
- " e: 2203 movs r2, #3\n",
- " 10: 2204 movs r2, #4\n",
- nullptr
-};
-const char* const MultiplyResults[] = {
- " 0: 4348 muls r0, r1\n",
- " 2: fb01 f002 mul.w r0, r1, r2\n",
- " 6: fb09 f808 mul.w r8, r9, r8\n",
- " a: fb09 f80a mul.w r8, r9, sl\n",
- " e: fb01 3002 mla r0, r1, r2, r3\n",
- " 12: fb09 9808 mla r8, r9, r8, r9\n",
- " 16: fb01 3012 mls r0, r1, r2, r3\n",
- " 1a: fb09 9818 mls r8, r9, r8, r9\n",
- " 1e: fba2 0103 umull r0, r1, r2, r3\n",
- " 22: fbaa 890b umull r8, r9, sl, fp\n",
- nullptr
-};
-const char* const DivideResults[] = {
- " 0: fb91 f0f2 sdiv r0, r1, r2\n",
- " 4: fb99 f8fa sdiv r8, r9, sl\n",
- " 8: fbb1 f0f2 udiv r0, r1, r2\n",
- " c: fbb9 f8fa udiv r8, r9, sl\n",
- nullptr
-};
-const char* const VMovResults[] = {
- " 0: eef7 0a00 vmov.f32 s1, #112 ; 0x70\n",
- " 4: eeb7 1b00 vmov.f64 d1, #112 ; 0x70\n",
- " 8: eef0 0a41 vmov.f32 s1, s2\n",
- " c: eeb0 1b42 vmov.f64 d1, d2\n",
- nullptr
-};
-const char* const BasicFloatingPointResults[] = {
- " 0: ee30 0a81 vadd.f32 s0, s1, s2\n",
- " 4: ee30 0ac1 vsub.f32 s0, s1, s2\n",
- " 8: ee20 0a81 vmul.f32 s0, s1, s2\n",
- " c: ee00 0a81 vmla.f32 s0, s1, s2\n",
- " 10: ee00 0ac1 vmls.f32 s0, s1, s2\n",
- " 14: ee80 0a81 vdiv.f32 s0, s1, s2\n",
- " 18: eeb0 0ae0 vabs.f32 s0, s1\n",
- " 1c: eeb1 0a60 vneg.f32 s0, s1\n",
- " 20: eeb1 0ae0 vsqrt.f32 s0, s1\n",
- " 24: ee31 0b02 vadd.f64 d0, d1, d2\n",
- " 28: ee31 0b42 vsub.f64 d0, d1, d2\n",
- " 2c: ee21 0b02 vmul.f64 d0, d1, d2\n",
- " 30: ee01 0b02 vmla.f64 d0, d1, d2\n",
- " 34: ee01 0b42 vmls.f64 d0, d1, d2\n",
- " 38: ee81 0b02 vdiv.f64 d0, d1, d2\n",
- " 3c: eeb0 0bc1 vabs.f64 d0, d1\n",
- " 40: eeb1 0b41 vneg.f64 d0, d1\n",
- " 44: eeb1 0bc1 vsqrt.f64 d0, d1\n",
- nullptr
-};
-const char* const FloatingPointConversionsResults[] = {
- " 0: eeb7 1bc2 vcvt.f32.f64 s2, d2\n",
- " 4: eeb7 2ac1 vcvt.f64.f32 d2, s2\n",
- " 8: eefd 0ac1 vcvt.s32.f32 s1, s2\n",
- " c: eef8 0ac1 vcvt.f32.s32 s1, s2\n",
- " 10: eefd 0bc2 vcvt.s32.f64 s1, d2\n",
- " 14: eeb8 1bc1 vcvt.f64.s32 d1, s2\n",
- " 18: eefc 0ac1 vcvt.u32.f32 s1, s2\n",
- " 1c: eef8 0a41 vcvt.f32.u32 s1, s2\n",
- " 20: eefc 0bc2 vcvt.u32.f64 s1, d2\n",
- " 24: eeb8 1b41 vcvt.f64.u32 d1, s2\n",
- nullptr
-};
-const char* const FloatingPointComparisonsResults[] = {
- " 0: eeb4 0a60 vcmp.f32 s0, s1\n",
- " 4: eeb4 0b41 vcmp.f64 d0, d1\n",
- " 8: eeb5 1a40 vcmp.f32 s2, #0.0\n",
- " c: eeb5 2b40 vcmp.f64 d2, #0.0\n",
- nullptr
-};
-const char* const CallsResults[] = {
- " 0: 47f0 blx lr\n",
- " 2: 4770 bx lr\n",
- nullptr
-};
-const char* const BreakpointResults[] = {
- " 0: be00 bkpt 0x0000\n",
- nullptr
-};
-const char* const StrR1Results[] = {
- " 0: 9111 str r1, [sp, #68] ; 0x44\n",
- " 2: f8cd 142c str.w r1, [sp, #1068] ; 0x42c\n",
- nullptr
-};
-const char* const VPushPopResults[] = {
- " 0: ed2d 1a04 vpush {s2-s5}\n",
- " 4: ed2d 2b08 vpush {d2-d5}\n",
- " 8: ecbd 1a04 vpop {s2-s5}\n",
- " c: ecbd 2b08 vpop {d2-d5}\n",
- nullptr
-};
-const char* const Max16BitBranchResults[] = {
- " 0: e3ff b.n 802 <Max16BitBranch+0x802>\n",
- " 2: 2300 movs r3, #0\n",
- " 4: 2302 movs r3, #2\n",
- " 6: 2304 movs r3, #4\n",
- " 8: 2306 movs r3, #6\n",
- " a: 2308 movs r3, #8\n",
- " c: 230a movs r3, #10\n",
- " e: 230c movs r3, #12\n",
- " 10: 230e movs r3, #14\n",
- " 12: 2310 movs r3, #16\n",
- " 14: 2312 movs r3, #18\n",
- " 16: 2314 movs r3, #20\n",
- " 18: 2316 movs r3, #22\n",
- " 1a: 2318 movs r3, #24\n",
- " 1c: 231a movs r3, #26\n",
- " 1e: 231c movs r3, #28\n",
- " 20: 231e movs r3, #30\n",
- " 22: 2320 movs r3, #32\n",
- " 24: 2322 movs r3, #34 ; 0x22\n",
- " 26: 2324 movs r3, #36 ; 0x24\n",
- " 28: 2326 movs r3, #38 ; 0x26\n",
- " 2a: 2328 movs r3, #40 ; 0x28\n",
- " 2c: 232a movs r3, #42 ; 0x2a\n",
- " 2e: 232c movs r3, #44 ; 0x2c\n",
- " 30: 232e movs r3, #46 ; 0x2e\n",
- " 32: 2330 movs r3, #48 ; 0x30\n",
- " 34: 2332 movs r3, #50 ; 0x32\n",
- " 36: 2334 movs r3, #52 ; 0x34\n",
- " 38: 2336 movs r3, #54 ; 0x36\n",
- " 3a: 2338 movs r3, #56 ; 0x38\n",
- " 3c: 233a movs r3, #58 ; 0x3a\n",
- " 3e: 233c movs r3, #60 ; 0x3c\n",
- " 40: 233e movs r3, #62 ; 0x3e\n",
- " 42: 2340 movs r3, #64 ; 0x40\n",
- " 44: 2342 movs r3, #66 ; 0x42\n",
- " 46: 2344 movs r3, #68 ; 0x44\n",
- " 48: 2346 movs r3, #70 ; 0x46\n",
- " 4a: 2348 movs r3, #72 ; 0x48\n",
- " 4c: 234a movs r3, #74 ; 0x4a\n",
- " 4e: 234c movs r3, #76 ; 0x4c\n",
- " 50: 234e movs r3, #78 ; 0x4e\n",
- " 52: 2350 movs r3, #80 ; 0x50\n",
- " 54: 2352 movs r3, #82 ; 0x52\n",
- " 56: 2354 movs r3, #84 ; 0x54\n",
- " 58: 2356 movs r3, #86 ; 0x56\n",
- " 5a: 2358 movs r3, #88 ; 0x58\n",
- " 5c: 235a movs r3, #90 ; 0x5a\n",
- " 5e: 235c movs r3, #92 ; 0x5c\n",
- " 60: 235e movs r3, #94 ; 0x5e\n",
- " 62: 2360 movs r3, #96 ; 0x60\n",
- " 64: 2362 movs r3, #98 ; 0x62\n",
- " 66: 2364 movs r3, #100 ; 0x64\n",
- " 68: 2366 movs r3, #102 ; 0x66\n",
- " 6a: 2368 movs r3, #104 ; 0x68\n",
- " 6c: 236a movs r3, #106 ; 0x6a\n",
- " 6e: 236c movs r3, #108 ; 0x6c\n",
- " 70: 236e movs r3, #110 ; 0x6e\n",
- " 72: 2370 movs r3, #112 ; 0x70\n",
- " 74: 2372 movs r3, #114 ; 0x72\n",
- " 76: 2374 movs r3, #116 ; 0x74\n",
- " 78: 2376 movs r3, #118 ; 0x76\n",
- " 7a: 2378 movs r3, #120 ; 0x78\n",
- " 7c: 237a movs r3, #122 ; 0x7a\n",
- " 7e: 237c movs r3, #124 ; 0x7c\n",
- " 80: 237e movs r3, #126 ; 0x7e\n",
- " 82: 2380 movs r3, #128 ; 0x80\n",
- " 84: 2382 movs r3, #130 ; 0x82\n",
- " 86: 2384 movs r3, #132 ; 0x84\n",
- " 88: 2386 movs r3, #134 ; 0x86\n",
- " 8a: 2388 movs r3, #136 ; 0x88\n",
- " 8c: 238a movs r3, #138 ; 0x8a\n",
- " 8e: 238c movs r3, #140 ; 0x8c\n",
- " 90: 238e movs r3, #142 ; 0x8e\n",
- " 92: 2390 movs r3, #144 ; 0x90\n",
- " 94: 2392 movs r3, #146 ; 0x92\n",
- " 96: 2394 movs r3, #148 ; 0x94\n",
- " 98: 2396 movs r3, #150 ; 0x96\n",
- " 9a: 2398 movs r3, #152 ; 0x98\n",
- " 9c: 239a movs r3, #154 ; 0x9a\n",
- " 9e: 239c movs r3, #156 ; 0x9c\n",
- " a0: 239e movs r3, #158 ; 0x9e\n",
- " a2: 23a0 movs r3, #160 ; 0xa0\n",
- " a4: 23a2 movs r3, #162 ; 0xa2\n",
- " a6: 23a4 movs r3, #164 ; 0xa4\n",
- " a8: 23a6 movs r3, #166 ; 0xa6\n",
- " aa: 23a8 movs r3, #168 ; 0xa8\n",
- " ac: 23aa movs r3, #170 ; 0xaa\n",
- " ae: 23ac movs r3, #172 ; 0xac\n",
- " b0: 23ae movs r3, #174 ; 0xae\n",
- " b2: 23b0 movs r3, #176 ; 0xb0\n",
- " b4: 23b2 movs r3, #178 ; 0xb2\n",
- " b6: 23b4 movs r3, #180 ; 0xb4\n",
- " b8: 23b6 movs r3, #182 ; 0xb6\n",
- " ba: 23b8 movs r3, #184 ; 0xb8\n",
- " bc: 23ba movs r3, #186 ; 0xba\n",
- " be: 23bc movs r3, #188 ; 0xbc\n",
- " c0: 23be movs r3, #190 ; 0xbe\n",
- " c2: 23c0 movs r3, #192 ; 0xc0\n",
- " c4: 23c2 movs r3, #194 ; 0xc2\n",
- " c6: 23c4 movs r3, #196 ; 0xc4\n",
- " c8: 23c6 movs r3, #198 ; 0xc6\n",
- " ca: 23c8 movs r3, #200 ; 0xc8\n",
- " cc: 23ca movs r3, #202 ; 0xca\n",
- " ce: 23cc movs r3, #204 ; 0xcc\n",
- " d0: 23ce movs r3, #206 ; 0xce\n",
- " d2: 23d0 movs r3, #208 ; 0xd0\n",
- " d4: 23d2 movs r3, #210 ; 0xd2\n",
- " d6: 23d4 movs r3, #212 ; 0xd4\n",
- " d8: 23d6 movs r3, #214 ; 0xd6\n",
- " da: 23d8 movs r3, #216 ; 0xd8\n",
- " dc: 23da movs r3, #218 ; 0xda\n",
- " de: 23dc movs r3, #220 ; 0xdc\n",
- " e0: 23de movs r3, #222 ; 0xde\n",
- " e2: 23e0 movs r3, #224 ; 0xe0\n",
- " e4: 23e2 movs r3, #226 ; 0xe2\n",
- " e6: 23e4 movs r3, #228 ; 0xe4\n",
- " e8: 23e6 movs r3, #230 ; 0xe6\n",
- " ea: 23e8 movs r3, #232 ; 0xe8\n",
- " ec: 23ea movs r3, #234 ; 0xea\n",
- " ee: 23ec movs r3, #236 ; 0xec\n",
- " f0: 23ee movs r3, #238 ; 0xee\n",
- " f2: 23f0 movs r3, #240 ; 0xf0\n",
- " f4: 23f2 movs r3, #242 ; 0xf2\n",
- " f6: 23f4 movs r3, #244 ; 0xf4\n",
- " f8: 23f6 movs r3, #246 ; 0xf6\n",
- " fa: 23f8 movs r3, #248 ; 0xf8\n",
- " fc: 23fa movs r3, #250 ; 0xfa\n",
- " fe: 23fc movs r3, #252 ; 0xfc\n",
- " 100: 23fe movs r3, #254 ; 0xfe\n",
- " 102: 2300 movs r3, #0\n",
- " 104: 2302 movs r3, #2\n",
- " 106: 2304 movs r3, #4\n",
- " 108: 2306 movs r3, #6\n",
- " 10a: 2308 movs r3, #8\n",
- " 10c: 230a movs r3, #10\n",
- " 10e: 230c movs r3, #12\n",
- " 110: 230e movs r3, #14\n",
- " 112: 2310 movs r3, #16\n",
- " 114: 2312 movs r3, #18\n",
- " 116: 2314 movs r3, #20\n",
- " 118: 2316 movs r3, #22\n",
- " 11a: 2318 movs r3, #24\n",
- " 11c: 231a movs r3, #26\n",
- " 11e: 231c movs r3, #28\n",
- " 120: 231e movs r3, #30\n",
- " 122: 2320 movs r3, #32\n",
- " 124: 2322 movs r3, #34 ; 0x22\n",
- " 126: 2324 movs r3, #36 ; 0x24\n",
- " 128: 2326 movs r3, #38 ; 0x26\n",
- " 12a: 2328 movs r3, #40 ; 0x28\n",
- " 12c: 232a movs r3, #42 ; 0x2a\n",
- " 12e: 232c movs r3, #44 ; 0x2c\n",
- " 130: 232e movs r3, #46 ; 0x2e\n",
- " 132: 2330 movs r3, #48 ; 0x30\n",
- " 134: 2332 movs r3, #50 ; 0x32\n",
- " 136: 2334 movs r3, #52 ; 0x34\n",
- " 138: 2336 movs r3, #54 ; 0x36\n",
- " 13a: 2338 movs r3, #56 ; 0x38\n",
- " 13c: 233a movs r3, #58 ; 0x3a\n",
- " 13e: 233c movs r3, #60 ; 0x3c\n",
- " 140: 233e movs r3, #62 ; 0x3e\n",
- " 142: 2340 movs r3, #64 ; 0x40\n",
- " 144: 2342 movs r3, #66 ; 0x42\n",
- " 146: 2344 movs r3, #68 ; 0x44\n",
- " 148: 2346 movs r3, #70 ; 0x46\n",
- " 14a: 2348 movs r3, #72 ; 0x48\n",
- " 14c: 234a movs r3, #74 ; 0x4a\n",
- " 14e: 234c movs r3, #76 ; 0x4c\n",
- " 150: 234e movs r3, #78 ; 0x4e\n",
- " 152: 2350 movs r3, #80 ; 0x50\n",
- " 154: 2352 movs r3, #82 ; 0x52\n",
- " 156: 2354 movs r3, #84 ; 0x54\n",
- " 158: 2356 movs r3, #86 ; 0x56\n",
- " 15a: 2358 movs r3, #88 ; 0x58\n",
- " 15c: 235a movs r3, #90 ; 0x5a\n",
- " 15e: 235c movs r3, #92 ; 0x5c\n",
- " 160: 235e movs r3, #94 ; 0x5e\n",
- " 162: 2360 movs r3, #96 ; 0x60\n",
- " 164: 2362 movs r3, #98 ; 0x62\n",
- " 166: 2364 movs r3, #100 ; 0x64\n",
- " 168: 2366 movs r3, #102 ; 0x66\n",
- " 16a: 2368 movs r3, #104 ; 0x68\n",
- " 16c: 236a movs r3, #106 ; 0x6a\n",
- " 16e: 236c movs r3, #108 ; 0x6c\n",
- " 170: 236e movs r3, #110 ; 0x6e\n",
- " 172: 2370 movs r3, #112 ; 0x70\n",
- " 174: 2372 movs r3, #114 ; 0x72\n",
- " 176: 2374 movs r3, #116 ; 0x74\n",
- " 178: 2376 movs r3, #118 ; 0x76\n",
- " 17a: 2378 movs r3, #120 ; 0x78\n",
- " 17c: 237a movs r3, #122 ; 0x7a\n",
- " 17e: 237c movs r3, #124 ; 0x7c\n",
- " 180: 237e movs r3, #126 ; 0x7e\n",
- " 182: 2380 movs r3, #128 ; 0x80\n",
- " 184: 2382 movs r3, #130 ; 0x82\n",
- " 186: 2384 movs r3, #132 ; 0x84\n",
- " 188: 2386 movs r3, #134 ; 0x86\n",
- " 18a: 2388 movs r3, #136 ; 0x88\n",
- " 18c: 238a movs r3, #138 ; 0x8a\n",
- " 18e: 238c movs r3, #140 ; 0x8c\n",
- " 190: 238e movs r3, #142 ; 0x8e\n",
- " 192: 2390 movs r3, #144 ; 0x90\n",
- " 194: 2392 movs r3, #146 ; 0x92\n",
- " 196: 2394 movs r3, #148 ; 0x94\n",
- " 198: 2396 movs r3, #150 ; 0x96\n",
- " 19a: 2398 movs r3, #152 ; 0x98\n",
- " 19c: 239a movs r3, #154 ; 0x9a\n",
- " 19e: 239c movs r3, #156 ; 0x9c\n",
- " 1a0: 239e movs r3, #158 ; 0x9e\n",
- " 1a2: 23a0 movs r3, #160 ; 0xa0\n",
- " 1a4: 23a2 movs r3, #162 ; 0xa2\n",
- " 1a6: 23a4 movs r3, #164 ; 0xa4\n",
- " 1a8: 23a6 movs r3, #166 ; 0xa6\n",
- " 1aa: 23a8 movs r3, #168 ; 0xa8\n",
- " 1ac: 23aa movs r3, #170 ; 0xaa\n",
- " 1ae: 23ac movs r3, #172 ; 0xac\n",
- " 1b0: 23ae movs r3, #174 ; 0xae\n",
- " 1b2: 23b0 movs r3, #176 ; 0xb0\n",
- " 1b4: 23b2 movs r3, #178 ; 0xb2\n",
- " 1b6: 23b4 movs r3, #180 ; 0xb4\n",
- " 1b8: 23b6 movs r3, #182 ; 0xb6\n",
- " 1ba: 23b8 movs r3, #184 ; 0xb8\n",
- " 1bc: 23ba movs r3, #186 ; 0xba\n",
- " 1be: 23bc movs r3, #188 ; 0xbc\n",
- " 1c0: 23be movs r3, #190 ; 0xbe\n",
- " 1c2: 23c0 movs r3, #192 ; 0xc0\n",
- " 1c4: 23c2 movs r3, #194 ; 0xc2\n",
- " 1c6: 23c4 movs r3, #196 ; 0xc4\n",
- " 1c8: 23c6 movs r3, #198 ; 0xc6\n",
- " 1ca: 23c8 movs r3, #200 ; 0xc8\n",
- " 1cc: 23ca movs r3, #202 ; 0xca\n",
- " 1ce: 23cc movs r3, #204 ; 0xcc\n",
- " 1d0: 23ce movs r3, #206 ; 0xce\n",
- " 1d2: 23d0 movs r3, #208 ; 0xd0\n",
- " 1d4: 23d2 movs r3, #210 ; 0xd2\n",
- " 1d6: 23d4 movs r3, #212 ; 0xd4\n",
- " 1d8: 23d6 movs r3, #214 ; 0xd6\n",
- " 1da: 23d8 movs r3, #216 ; 0xd8\n",
- " 1dc: 23da movs r3, #218 ; 0xda\n",
- " 1de: 23dc movs r3, #220 ; 0xdc\n",
- " 1e0: 23de movs r3, #222 ; 0xde\n",
- " 1e2: 23e0 movs r3, #224 ; 0xe0\n",
- " 1e4: 23e2 movs r3, #226 ; 0xe2\n",
- " 1e6: 23e4 movs r3, #228 ; 0xe4\n",
- " 1e8: 23e6 movs r3, #230 ; 0xe6\n",
- " 1ea: 23e8 movs r3, #232 ; 0xe8\n",
- " 1ec: 23ea movs r3, #234 ; 0xea\n",
- " 1ee: 23ec movs r3, #236 ; 0xec\n",
- " 1f0: 23ee movs r3, #238 ; 0xee\n",
- " 1f2: 23f0 movs r3, #240 ; 0xf0\n",
- " 1f4: 23f2 movs r3, #242 ; 0xf2\n",
- " 1f6: 23f4 movs r3, #244 ; 0xf4\n",
- " 1f8: 23f6 movs r3, #246 ; 0xf6\n",
- " 1fa: 23f8 movs r3, #248 ; 0xf8\n",
- " 1fc: 23fa movs r3, #250 ; 0xfa\n",
- " 1fe: 23fc movs r3, #252 ; 0xfc\n",
- " 200: 23fe movs r3, #254 ; 0xfe\n",
- " 202: 2300 movs r3, #0\n",
- " 204: 2302 movs r3, #2\n",
- " 206: 2304 movs r3, #4\n",
- " 208: 2306 movs r3, #6\n",
- " 20a: 2308 movs r3, #8\n",
- " 20c: 230a movs r3, #10\n",
- " 20e: 230c movs r3, #12\n",
- " 210: 230e movs r3, #14\n",
- " 212: 2310 movs r3, #16\n",
- " 214: 2312 movs r3, #18\n",
- " 216: 2314 movs r3, #20\n",
- " 218: 2316 movs r3, #22\n",
- " 21a: 2318 movs r3, #24\n",
- " 21c: 231a movs r3, #26\n",
- " 21e: 231c movs r3, #28\n",
- " 220: 231e movs r3, #30\n",
- " 222: 2320 movs r3, #32\n",
- " 224: 2322 movs r3, #34 ; 0x22\n",
- " 226: 2324 movs r3, #36 ; 0x24\n",
- " 228: 2326 movs r3, #38 ; 0x26\n",
- " 22a: 2328 movs r3, #40 ; 0x28\n",
- " 22c: 232a movs r3, #42 ; 0x2a\n",
- " 22e: 232c movs r3, #44 ; 0x2c\n",
- " 230: 232e movs r3, #46 ; 0x2e\n",
- " 232: 2330 movs r3, #48 ; 0x30\n",
- " 234: 2332 movs r3, #50 ; 0x32\n",
- " 236: 2334 movs r3, #52 ; 0x34\n",
- " 238: 2336 movs r3, #54 ; 0x36\n",
- " 23a: 2338 movs r3, #56 ; 0x38\n",
- " 23c: 233a movs r3, #58 ; 0x3a\n",
- " 23e: 233c movs r3, #60 ; 0x3c\n",
- " 240: 233e movs r3, #62 ; 0x3e\n",
- " 242: 2340 movs r3, #64 ; 0x40\n",
- " 244: 2342 movs r3, #66 ; 0x42\n",
- " 246: 2344 movs r3, #68 ; 0x44\n",
- " 248: 2346 movs r3, #70 ; 0x46\n",
- " 24a: 2348 movs r3, #72 ; 0x48\n",
- " 24c: 234a movs r3, #74 ; 0x4a\n",
- " 24e: 234c movs r3, #76 ; 0x4c\n",
- " 250: 234e movs r3, #78 ; 0x4e\n",
- " 252: 2350 movs r3, #80 ; 0x50\n",
- " 254: 2352 movs r3, #82 ; 0x52\n",
- " 256: 2354 movs r3, #84 ; 0x54\n",
- " 258: 2356 movs r3, #86 ; 0x56\n",
- " 25a: 2358 movs r3, #88 ; 0x58\n",
- " 25c: 235a movs r3, #90 ; 0x5a\n",
- " 25e: 235c movs r3, #92 ; 0x5c\n",
- " 260: 235e movs r3, #94 ; 0x5e\n",
- " 262: 2360 movs r3, #96 ; 0x60\n",
- " 264: 2362 movs r3, #98 ; 0x62\n",
- " 266: 2364 movs r3, #100 ; 0x64\n",
- " 268: 2366 movs r3, #102 ; 0x66\n",
- " 26a: 2368 movs r3, #104 ; 0x68\n",
- " 26c: 236a movs r3, #106 ; 0x6a\n",
- " 26e: 236c movs r3, #108 ; 0x6c\n",
- " 270: 236e movs r3, #110 ; 0x6e\n",
- " 272: 2370 movs r3, #112 ; 0x70\n",
- " 274: 2372 movs r3, #114 ; 0x72\n",
- " 276: 2374 movs r3, #116 ; 0x74\n",
- " 278: 2376 movs r3, #118 ; 0x76\n",
- " 27a: 2378 movs r3, #120 ; 0x78\n",
- " 27c: 237a movs r3, #122 ; 0x7a\n",
- " 27e: 237c movs r3, #124 ; 0x7c\n",
- " 280: 237e movs r3, #126 ; 0x7e\n",
- " 282: 2380 movs r3, #128 ; 0x80\n",
- " 284: 2382 movs r3, #130 ; 0x82\n",
- " 286: 2384 movs r3, #132 ; 0x84\n",
- " 288: 2386 movs r3, #134 ; 0x86\n",
- " 28a: 2388 movs r3, #136 ; 0x88\n",
- " 28c: 238a movs r3, #138 ; 0x8a\n",
- " 28e: 238c movs r3, #140 ; 0x8c\n",
- " 290: 238e movs r3, #142 ; 0x8e\n",
- " 292: 2390 movs r3, #144 ; 0x90\n",
- " 294: 2392 movs r3, #146 ; 0x92\n",
- " 296: 2394 movs r3, #148 ; 0x94\n",
- " 298: 2396 movs r3, #150 ; 0x96\n",
- " 29a: 2398 movs r3, #152 ; 0x98\n",
- " 29c: 239a movs r3, #154 ; 0x9a\n",
- " 29e: 239c movs r3, #156 ; 0x9c\n",
- " 2a0: 239e movs r3, #158 ; 0x9e\n",
- " 2a2: 23a0 movs r3, #160 ; 0xa0\n",
- " 2a4: 23a2 movs r3, #162 ; 0xa2\n",
- " 2a6: 23a4 movs r3, #164 ; 0xa4\n",
- " 2a8: 23a6 movs r3, #166 ; 0xa6\n",
- " 2aa: 23a8 movs r3, #168 ; 0xa8\n",
- " 2ac: 23aa movs r3, #170 ; 0xaa\n",
- " 2ae: 23ac movs r3, #172 ; 0xac\n",
- " 2b0: 23ae movs r3, #174 ; 0xae\n",
- " 2b2: 23b0 movs r3, #176 ; 0xb0\n",
- " 2b4: 23b2 movs r3, #178 ; 0xb2\n",
- " 2b6: 23b4 movs r3, #180 ; 0xb4\n",
- " 2b8: 23b6 movs r3, #182 ; 0xb6\n",
- " 2ba: 23b8 movs r3, #184 ; 0xb8\n",
- " 2bc: 23ba movs r3, #186 ; 0xba\n",
- " 2be: 23bc movs r3, #188 ; 0xbc\n",
- " 2c0: 23be movs r3, #190 ; 0xbe\n",
- " 2c2: 23c0 movs r3, #192 ; 0xc0\n",
- " 2c4: 23c2 movs r3, #194 ; 0xc2\n",
- " 2c6: 23c4 movs r3, #196 ; 0xc4\n",
- " 2c8: 23c6 movs r3, #198 ; 0xc6\n",
- " 2ca: 23c8 movs r3, #200 ; 0xc8\n",
- " 2cc: 23ca movs r3, #202 ; 0xca\n",
- " 2ce: 23cc movs r3, #204 ; 0xcc\n",
- " 2d0: 23ce movs r3, #206 ; 0xce\n",
- " 2d2: 23d0 movs r3, #208 ; 0xd0\n",
- " 2d4: 23d2 movs r3, #210 ; 0xd2\n",
- " 2d6: 23d4 movs r3, #212 ; 0xd4\n",
- " 2d8: 23d6 movs r3, #214 ; 0xd6\n",
- " 2da: 23d8 movs r3, #216 ; 0xd8\n",
- " 2dc: 23da movs r3, #218 ; 0xda\n",
- " 2de: 23dc movs r3, #220 ; 0xdc\n",
- " 2e0: 23de movs r3, #222 ; 0xde\n",
- " 2e2: 23e0 movs r3, #224 ; 0xe0\n",
- " 2e4: 23e2 movs r3, #226 ; 0xe2\n",
- " 2e6: 23e4 movs r3, #228 ; 0xe4\n",
- " 2e8: 23e6 movs r3, #230 ; 0xe6\n",
- " 2ea: 23e8 movs r3, #232 ; 0xe8\n",
- " 2ec: 23ea movs r3, #234 ; 0xea\n",
- " 2ee: 23ec movs r3, #236 ; 0xec\n",
- " 2f0: 23ee movs r3, #238 ; 0xee\n",
- " 2f2: 23f0 movs r3, #240 ; 0xf0\n",
- " 2f4: 23f2 movs r3, #242 ; 0xf2\n",
- " 2f6: 23f4 movs r3, #244 ; 0xf4\n",
- " 2f8: 23f6 movs r3, #246 ; 0xf6\n",
- " 2fa: 23f8 movs r3, #248 ; 0xf8\n",
- " 2fc: 23fa movs r3, #250 ; 0xfa\n",
- " 2fe: 23fc movs r3, #252 ; 0xfc\n",
- " 300: 23fe movs r3, #254 ; 0xfe\n",
- " 302: 2300 movs r3, #0\n",
- " 304: 2302 movs r3, #2\n",
- " 306: 2304 movs r3, #4\n",
- " 308: 2306 movs r3, #6\n",
- " 30a: 2308 movs r3, #8\n",
- " 30c: 230a movs r3, #10\n",
- " 30e: 230c movs r3, #12\n",
- " 310: 230e movs r3, #14\n",
- " 312: 2310 movs r3, #16\n",
- " 314: 2312 movs r3, #18\n",
- " 316: 2314 movs r3, #20\n",
- " 318: 2316 movs r3, #22\n",
- " 31a: 2318 movs r3, #24\n",
- " 31c: 231a movs r3, #26\n",
- " 31e: 231c movs r3, #28\n",
- " 320: 231e movs r3, #30\n",
- " 322: 2320 movs r3, #32\n",
- " 324: 2322 movs r3, #34 ; 0x22\n",
- " 326: 2324 movs r3, #36 ; 0x24\n",
- " 328: 2326 movs r3, #38 ; 0x26\n",
- " 32a: 2328 movs r3, #40 ; 0x28\n",
- " 32c: 232a movs r3, #42 ; 0x2a\n",
- " 32e: 232c movs r3, #44 ; 0x2c\n",
- " 330: 232e movs r3, #46 ; 0x2e\n",
- " 332: 2330 movs r3, #48 ; 0x30\n",
- " 334: 2332 movs r3, #50 ; 0x32\n",
- " 336: 2334 movs r3, #52 ; 0x34\n",
- " 338: 2336 movs r3, #54 ; 0x36\n",
- " 33a: 2338 movs r3, #56 ; 0x38\n",
- " 33c: 233a movs r3, #58 ; 0x3a\n",
- " 33e: 233c movs r3, #60 ; 0x3c\n",
- " 340: 233e movs r3, #62 ; 0x3e\n",
- " 342: 2340 movs r3, #64 ; 0x40\n",
- " 344: 2342 movs r3, #66 ; 0x42\n",
- " 346: 2344 movs r3, #68 ; 0x44\n",
- " 348: 2346 movs r3, #70 ; 0x46\n",
- " 34a: 2348 movs r3, #72 ; 0x48\n",
- " 34c: 234a movs r3, #74 ; 0x4a\n",
- " 34e: 234c movs r3, #76 ; 0x4c\n",
- " 350: 234e movs r3, #78 ; 0x4e\n",
- " 352: 2350 movs r3, #80 ; 0x50\n",
- " 354: 2352 movs r3, #82 ; 0x52\n",
- " 356: 2354 movs r3, #84 ; 0x54\n",
- " 358: 2356 movs r3, #86 ; 0x56\n",
- " 35a: 2358 movs r3, #88 ; 0x58\n",
- " 35c: 235a movs r3, #90 ; 0x5a\n",
- " 35e: 235c movs r3, #92 ; 0x5c\n",
- " 360: 235e movs r3, #94 ; 0x5e\n",
- " 362: 2360 movs r3, #96 ; 0x60\n",
- " 364: 2362 movs r3, #98 ; 0x62\n",
- " 366: 2364 movs r3, #100 ; 0x64\n",
- " 368: 2366 movs r3, #102 ; 0x66\n",
- " 36a: 2368 movs r3, #104 ; 0x68\n",
- " 36c: 236a movs r3, #106 ; 0x6a\n",
- " 36e: 236c movs r3, #108 ; 0x6c\n",
- " 370: 236e movs r3, #110 ; 0x6e\n",
- " 372: 2370 movs r3, #112 ; 0x70\n",
- " 374: 2372 movs r3, #114 ; 0x72\n",
- " 376: 2374 movs r3, #116 ; 0x74\n",
- " 378: 2376 movs r3, #118 ; 0x76\n",
- " 37a: 2378 movs r3, #120 ; 0x78\n",
- " 37c: 237a movs r3, #122 ; 0x7a\n",
- " 37e: 237c movs r3, #124 ; 0x7c\n",
- " 380: 237e movs r3, #126 ; 0x7e\n",
- " 382: 2380 movs r3, #128 ; 0x80\n",
- " 384: 2382 movs r3, #130 ; 0x82\n",
- " 386: 2384 movs r3, #132 ; 0x84\n",
- " 388: 2386 movs r3, #134 ; 0x86\n",
- " 38a: 2388 movs r3, #136 ; 0x88\n",
- " 38c: 238a movs r3, #138 ; 0x8a\n",
- " 38e: 238c movs r3, #140 ; 0x8c\n",
- " 390: 238e movs r3, #142 ; 0x8e\n",
- " 392: 2390 movs r3, #144 ; 0x90\n",
- " 394: 2392 movs r3, #146 ; 0x92\n",
- " 396: 2394 movs r3, #148 ; 0x94\n",
- " 398: 2396 movs r3, #150 ; 0x96\n",
- " 39a: 2398 movs r3, #152 ; 0x98\n",
- " 39c: 239a movs r3, #154 ; 0x9a\n",
- " 39e: 239c movs r3, #156 ; 0x9c\n",
- " 3a0: 239e movs r3, #158 ; 0x9e\n",
- " 3a2: 23a0 movs r3, #160 ; 0xa0\n",
- " 3a4: 23a2 movs r3, #162 ; 0xa2\n",
- " 3a6: 23a4 movs r3, #164 ; 0xa4\n",
- " 3a8: 23a6 movs r3, #166 ; 0xa6\n",
- " 3aa: 23a8 movs r3, #168 ; 0xa8\n",
- " 3ac: 23aa movs r3, #170 ; 0xaa\n",
- " 3ae: 23ac movs r3, #172 ; 0xac\n",
- " 3b0: 23ae movs r3, #174 ; 0xae\n",
- " 3b2: 23b0 movs r3, #176 ; 0xb0\n",
- " 3b4: 23b2 movs r3, #178 ; 0xb2\n",
- " 3b6: 23b4 movs r3, #180 ; 0xb4\n",
- " 3b8: 23b6 movs r3, #182 ; 0xb6\n",
- " 3ba: 23b8 movs r3, #184 ; 0xb8\n",
- " 3bc: 23ba movs r3, #186 ; 0xba\n",
- " 3be: 23bc movs r3, #188 ; 0xbc\n",
- " 3c0: 23be movs r3, #190 ; 0xbe\n",
- " 3c2: 23c0 movs r3, #192 ; 0xc0\n",
- " 3c4: 23c2 movs r3, #194 ; 0xc2\n",
- " 3c6: 23c4 movs r3, #196 ; 0xc4\n",
- " 3c8: 23c6 movs r3, #198 ; 0xc6\n",
- " 3ca: 23c8 movs r3, #200 ; 0xc8\n",
- " 3cc: 23ca movs r3, #202 ; 0xca\n",
- " 3ce: 23cc movs r3, #204 ; 0xcc\n",
- " 3d0: 23ce movs r3, #206 ; 0xce\n",
- " 3d2: 23d0 movs r3, #208 ; 0xd0\n",
- " 3d4: 23d2 movs r3, #210 ; 0xd2\n",
- " 3d6: 23d4 movs r3, #212 ; 0xd4\n",
- " 3d8: 23d6 movs r3, #214 ; 0xd6\n",
- " 3da: 23d8 movs r3, #216 ; 0xd8\n",
- " 3dc: 23da movs r3, #218 ; 0xda\n",
- " 3de: 23dc movs r3, #220 ; 0xdc\n",
- " 3e0: 23de movs r3, #222 ; 0xde\n",
- " 3e2: 23e0 movs r3, #224 ; 0xe0\n",
- " 3e4: 23e2 movs r3, #226 ; 0xe2\n",
- " 3e6: 23e4 movs r3, #228 ; 0xe4\n",
- " 3e8: 23e6 movs r3, #230 ; 0xe6\n",
- " 3ea: 23e8 movs r3, #232 ; 0xe8\n",
- " 3ec: 23ea movs r3, #234 ; 0xea\n",
- " 3ee: 23ec movs r3, #236 ; 0xec\n",
- " 3f0: 23ee movs r3, #238 ; 0xee\n",
- " 3f2: 23f0 movs r3, #240 ; 0xf0\n",
- " 3f4: 23f2 movs r3, #242 ; 0xf2\n",
- " 3f6: 23f4 movs r3, #244 ; 0xf4\n",
- " 3f8: 23f6 movs r3, #246 ; 0xf6\n",
- " 3fa: 23f8 movs r3, #248 ; 0xf8\n",
- " 3fc: 23fa movs r3, #250 ; 0xfa\n",
- " 3fe: 23fc movs r3, #252 ; 0xfc\n",
- " 400: 23fe movs r3, #254 ; 0xfe\n",
- " 402: 2300 movs r3, #0\n",
- " 404: 2302 movs r3, #2\n",
- " 406: 2304 movs r3, #4\n",
- " 408: 2306 movs r3, #6\n",
- " 40a: 2308 movs r3, #8\n",
- " 40c: 230a movs r3, #10\n",
- " 40e: 230c movs r3, #12\n",
- " 410: 230e movs r3, #14\n",
- " 412: 2310 movs r3, #16\n",
- " 414: 2312 movs r3, #18\n",
- " 416: 2314 movs r3, #20\n",
- " 418: 2316 movs r3, #22\n",
- " 41a: 2318 movs r3, #24\n",
- " 41c: 231a movs r3, #26\n",
- " 41e: 231c movs r3, #28\n",
- " 420: 231e movs r3, #30\n",
- " 422: 2320 movs r3, #32\n",
- " 424: 2322 movs r3, #34 ; 0x22\n",
- " 426: 2324 movs r3, #36 ; 0x24\n",
- " 428: 2326 movs r3, #38 ; 0x26\n",
- " 42a: 2328 movs r3, #40 ; 0x28\n",
- " 42c: 232a movs r3, #42 ; 0x2a\n",
- " 42e: 232c movs r3, #44 ; 0x2c\n",
- " 430: 232e movs r3, #46 ; 0x2e\n",
- " 432: 2330 movs r3, #48 ; 0x30\n",
- " 434: 2332 movs r3, #50 ; 0x32\n",
- " 436: 2334 movs r3, #52 ; 0x34\n",
- " 438: 2336 movs r3, #54 ; 0x36\n",
- " 43a: 2338 movs r3, #56 ; 0x38\n",
- " 43c: 233a movs r3, #58 ; 0x3a\n",
- " 43e: 233c movs r3, #60 ; 0x3c\n",
- " 440: 233e movs r3, #62 ; 0x3e\n",
- " 442: 2340 movs r3, #64 ; 0x40\n",
- " 444: 2342 movs r3, #66 ; 0x42\n",
- " 446: 2344 movs r3, #68 ; 0x44\n",
- " 448: 2346 movs r3, #70 ; 0x46\n",
- " 44a: 2348 movs r3, #72 ; 0x48\n",
- " 44c: 234a movs r3, #74 ; 0x4a\n",
- " 44e: 234c movs r3, #76 ; 0x4c\n",
- " 450: 234e movs r3, #78 ; 0x4e\n",
- " 452: 2350 movs r3, #80 ; 0x50\n",
- " 454: 2352 movs r3, #82 ; 0x52\n",
- " 456: 2354 movs r3, #84 ; 0x54\n",
- " 458: 2356 movs r3, #86 ; 0x56\n",
- " 45a: 2358 movs r3, #88 ; 0x58\n",
- " 45c: 235a movs r3, #90 ; 0x5a\n",
- " 45e: 235c movs r3, #92 ; 0x5c\n",
- " 460: 235e movs r3, #94 ; 0x5e\n",
- " 462: 2360 movs r3, #96 ; 0x60\n",
- " 464: 2362 movs r3, #98 ; 0x62\n",
- " 466: 2364 movs r3, #100 ; 0x64\n",
- " 468: 2366 movs r3, #102 ; 0x66\n",
- " 46a: 2368 movs r3, #104 ; 0x68\n",
- " 46c: 236a movs r3, #106 ; 0x6a\n",
- " 46e: 236c movs r3, #108 ; 0x6c\n",
- " 470: 236e movs r3, #110 ; 0x6e\n",
- " 472: 2370 movs r3, #112 ; 0x70\n",
- " 474: 2372 movs r3, #114 ; 0x72\n",
- " 476: 2374 movs r3, #116 ; 0x74\n",
- " 478: 2376 movs r3, #118 ; 0x76\n",
- " 47a: 2378 movs r3, #120 ; 0x78\n",
- " 47c: 237a movs r3, #122 ; 0x7a\n",
- " 47e: 237c movs r3, #124 ; 0x7c\n",
- " 480: 237e movs r3, #126 ; 0x7e\n",
- " 482: 2380 movs r3, #128 ; 0x80\n",
- " 484: 2382 movs r3, #130 ; 0x82\n",
- " 486: 2384 movs r3, #132 ; 0x84\n",
- " 488: 2386 movs r3, #134 ; 0x86\n",
- " 48a: 2388 movs r3, #136 ; 0x88\n",
- " 48c: 238a movs r3, #138 ; 0x8a\n",
- " 48e: 238c movs r3, #140 ; 0x8c\n",
- " 490: 238e movs r3, #142 ; 0x8e\n",
- " 492: 2390 movs r3, #144 ; 0x90\n",
- " 494: 2392 movs r3, #146 ; 0x92\n",
- " 496: 2394 movs r3, #148 ; 0x94\n",
- " 498: 2396 movs r3, #150 ; 0x96\n",
- " 49a: 2398 movs r3, #152 ; 0x98\n",
- " 49c: 239a movs r3, #154 ; 0x9a\n",
- " 49e: 239c movs r3, #156 ; 0x9c\n",
- " 4a0: 239e movs r3, #158 ; 0x9e\n",
- " 4a2: 23a0 movs r3, #160 ; 0xa0\n",
- " 4a4: 23a2 movs r3, #162 ; 0xa2\n",
- " 4a6: 23a4 movs r3, #164 ; 0xa4\n",
- " 4a8: 23a6 movs r3, #166 ; 0xa6\n",
- " 4aa: 23a8 movs r3, #168 ; 0xa8\n",
- " 4ac: 23aa movs r3, #170 ; 0xaa\n",
- " 4ae: 23ac movs r3, #172 ; 0xac\n",
- " 4b0: 23ae movs r3, #174 ; 0xae\n",
- " 4b2: 23b0 movs r3, #176 ; 0xb0\n",
- " 4b4: 23b2 movs r3, #178 ; 0xb2\n",
- " 4b6: 23b4 movs r3, #180 ; 0xb4\n",
- " 4b8: 23b6 movs r3, #182 ; 0xb6\n",
- " 4ba: 23b8 movs r3, #184 ; 0xb8\n",
- " 4bc: 23ba movs r3, #186 ; 0xba\n",
- " 4be: 23bc movs r3, #188 ; 0xbc\n",
- " 4c0: 23be movs r3, #190 ; 0xbe\n",
- " 4c2: 23c0 movs r3, #192 ; 0xc0\n",
- " 4c4: 23c2 movs r3, #194 ; 0xc2\n",
- " 4c6: 23c4 movs r3, #196 ; 0xc4\n",
- " 4c8: 23c6 movs r3, #198 ; 0xc6\n",
- " 4ca: 23c8 movs r3, #200 ; 0xc8\n",
- " 4cc: 23ca movs r3, #202 ; 0xca\n",
- " 4ce: 23cc movs r3, #204 ; 0xcc\n",
- " 4d0: 23ce movs r3, #206 ; 0xce\n",
- " 4d2: 23d0 movs r3, #208 ; 0xd0\n",
- " 4d4: 23d2 movs r3, #210 ; 0xd2\n",
- " 4d6: 23d4 movs r3, #212 ; 0xd4\n",
- " 4d8: 23d6 movs r3, #214 ; 0xd6\n",
- " 4da: 23d8 movs r3, #216 ; 0xd8\n",
- " 4dc: 23da movs r3, #218 ; 0xda\n",
- " 4de: 23dc movs r3, #220 ; 0xdc\n",
- " 4e0: 23de movs r3, #222 ; 0xde\n",
- " 4e2: 23e0 movs r3, #224 ; 0xe0\n",
- " 4e4: 23e2 movs r3, #226 ; 0xe2\n",
- " 4e6: 23e4 movs r3, #228 ; 0xe4\n",
- " 4e8: 23e6 movs r3, #230 ; 0xe6\n",
- " 4ea: 23e8 movs r3, #232 ; 0xe8\n",
- " 4ec: 23ea movs r3, #234 ; 0xea\n",
- " 4ee: 23ec movs r3, #236 ; 0xec\n",
- " 4f0: 23ee movs r3, #238 ; 0xee\n",
- " 4f2: 23f0 movs r3, #240 ; 0xf0\n",
- " 4f4: 23f2 movs r3, #242 ; 0xf2\n",
- " 4f6: 23f4 movs r3, #244 ; 0xf4\n",
- " 4f8: 23f6 movs r3, #246 ; 0xf6\n",
- " 4fa: 23f8 movs r3, #248 ; 0xf8\n",
- " 4fc: 23fa movs r3, #250 ; 0xfa\n",
- " 4fe: 23fc movs r3, #252 ; 0xfc\n",
- " 500: 23fe movs r3, #254 ; 0xfe\n",
- " 502: 2300 movs r3, #0\n",
- " 504: 2302 movs r3, #2\n",
- " 506: 2304 movs r3, #4\n",
- " 508: 2306 movs r3, #6\n",
- " 50a: 2308 movs r3, #8\n",
- " 50c: 230a movs r3, #10\n",
- " 50e: 230c movs r3, #12\n",
- " 510: 230e movs r3, #14\n",
- " 512: 2310 movs r3, #16\n",
- " 514: 2312 movs r3, #18\n",
- " 516: 2314 movs r3, #20\n",
- " 518: 2316 movs r3, #22\n",
- " 51a: 2318 movs r3, #24\n",
- " 51c: 231a movs r3, #26\n",
- " 51e: 231c movs r3, #28\n",
- " 520: 231e movs r3, #30\n",
- " 522: 2320 movs r3, #32\n",
- " 524: 2322 movs r3, #34 ; 0x22\n",
- " 526: 2324 movs r3, #36 ; 0x24\n",
- " 528: 2326 movs r3, #38 ; 0x26\n",
- " 52a: 2328 movs r3, #40 ; 0x28\n",
- " 52c: 232a movs r3, #42 ; 0x2a\n",
- " 52e: 232c movs r3, #44 ; 0x2c\n",
- " 530: 232e movs r3, #46 ; 0x2e\n",
- " 532: 2330 movs r3, #48 ; 0x30\n",
- " 534: 2332 movs r3, #50 ; 0x32\n",
- " 536: 2334 movs r3, #52 ; 0x34\n",
- " 538: 2336 movs r3, #54 ; 0x36\n",
- " 53a: 2338 movs r3, #56 ; 0x38\n",
- " 53c: 233a movs r3, #58 ; 0x3a\n",
- " 53e: 233c movs r3, #60 ; 0x3c\n",
- " 540: 233e movs r3, #62 ; 0x3e\n",
- " 542: 2340 movs r3, #64 ; 0x40\n",
- " 544: 2342 movs r3, #66 ; 0x42\n",
- " 546: 2344 movs r3, #68 ; 0x44\n",
- " 548: 2346 movs r3, #70 ; 0x46\n",
- " 54a: 2348 movs r3, #72 ; 0x48\n",
- " 54c: 234a movs r3, #74 ; 0x4a\n",
- " 54e: 234c movs r3, #76 ; 0x4c\n",
- " 550: 234e movs r3, #78 ; 0x4e\n",
- " 552: 2350 movs r3, #80 ; 0x50\n",
- " 554: 2352 movs r3, #82 ; 0x52\n",
- " 556: 2354 movs r3, #84 ; 0x54\n",
- " 558: 2356 movs r3, #86 ; 0x56\n",
- " 55a: 2358 movs r3, #88 ; 0x58\n",
- " 55c: 235a movs r3, #90 ; 0x5a\n",
- " 55e: 235c movs r3, #92 ; 0x5c\n",
- " 560: 235e movs r3, #94 ; 0x5e\n",
- " 562: 2360 movs r3, #96 ; 0x60\n",
- " 564: 2362 movs r3, #98 ; 0x62\n",
- " 566: 2364 movs r3, #100 ; 0x64\n",
- " 568: 2366 movs r3, #102 ; 0x66\n",
- " 56a: 2368 movs r3, #104 ; 0x68\n",
- " 56c: 236a movs r3, #106 ; 0x6a\n",
- " 56e: 236c movs r3, #108 ; 0x6c\n",
- " 570: 236e movs r3, #110 ; 0x6e\n",
- " 572: 2370 movs r3, #112 ; 0x70\n",
- " 574: 2372 movs r3, #114 ; 0x72\n",
- " 576: 2374 movs r3, #116 ; 0x74\n",
- " 578: 2376 movs r3, #118 ; 0x76\n",
- " 57a: 2378 movs r3, #120 ; 0x78\n",
- " 57c: 237a movs r3, #122 ; 0x7a\n",
- " 57e: 237c movs r3, #124 ; 0x7c\n",
- " 580: 237e movs r3, #126 ; 0x7e\n",
- " 582: 2380 movs r3, #128 ; 0x80\n",
- " 584: 2382 movs r3, #130 ; 0x82\n",
- " 586: 2384 movs r3, #132 ; 0x84\n",
- " 588: 2386 movs r3, #134 ; 0x86\n",
- " 58a: 2388 movs r3, #136 ; 0x88\n",
- " 58c: 238a movs r3, #138 ; 0x8a\n",
- " 58e: 238c movs r3, #140 ; 0x8c\n",
- " 590: 238e movs r3, #142 ; 0x8e\n",
- " 592: 2390 movs r3, #144 ; 0x90\n",
- " 594: 2392 movs r3, #146 ; 0x92\n",
- " 596: 2394 movs r3, #148 ; 0x94\n",
- " 598: 2396 movs r3, #150 ; 0x96\n",
- " 59a: 2398 movs r3, #152 ; 0x98\n",
- " 59c: 239a movs r3, #154 ; 0x9a\n",
- " 59e: 239c movs r3, #156 ; 0x9c\n",
- " 5a0: 239e movs r3, #158 ; 0x9e\n",
- " 5a2: 23a0 movs r3, #160 ; 0xa0\n",
- " 5a4: 23a2 movs r3, #162 ; 0xa2\n",
- " 5a6: 23a4 movs r3, #164 ; 0xa4\n",
- " 5a8: 23a6 movs r3, #166 ; 0xa6\n",
- " 5aa: 23a8 movs r3, #168 ; 0xa8\n",
- " 5ac: 23aa movs r3, #170 ; 0xaa\n",
- " 5ae: 23ac movs r3, #172 ; 0xac\n",
- " 5b0: 23ae movs r3, #174 ; 0xae\n",
- " 5b2: 23b0 movs r3, #176 ; 0xb0\n",
- " 5b4: 23b2 movs r3, #178 ; 0xb2\n",
- " 5b6: 23b4 movs r3, #180 ; 0xb4\n",
- " 5b8: 23b6 movs r3, #182 ; 0xb6\n",
- " 5ba: 23b8 movs r3, #184 ; 0xb8\n",
- " 5bc: 23ba movs r3, #186 ; 0xba\n",
- " 5be: 23bc movs r3, #188 ; 0xbc\n",
- " 5c0: 23be movs r3, #190 ; 0xbe\n",
- " 5c2: 23c0 movs r3, #192 ; 0xc0\n",
- " 5c4: 23c2 movs r3, #194 ; 0xc2\n",
- " 5c6: 23c4 movs r3, #196 ; 0xc4\n",
- " 5c8: 23c6 movs r3, #198 ; 0xc6\n",
- " 5ca: 23c8 movs r3, #200 ; 0xc8\n",
- " 5cc: 23ca movs r3, #202 ; 0xca\n",
- " 5ce: 23cc movs r3, #204 ; 0xcc\n",
- " 5d0: 23ce movs r3, #206 ; 0xce\n",
- " 5d2: 23d0 movs r3, #208 ; 0xd0\n",
- " 5d4: 23d2 movs r3, #210 ; 0xd2\n",
- " 5d6: 23d4 movs r3, #212 ; 0xd4\n",
- " 5d8: 23d6 movs r3, #214 ; 0xd6\n",
- " 5da: 23d8 movs r3, #216 ; 0xd8\n",
- " 5dc: 23da movs r3, #218 ; 0xda\n",
- " 5de: 23dc movs r3, #220 ; 0xdc\n",
- " 5e0: 23de movs r3, #222 ; 0xde\n",
- " 5e2: 23e0 movs r3, #224 ; 0xe0\n",
- " 5e4: 23e2 movs r3, #226 ; 0xe2\n",
- " 5e6: 23e4 movs r3, #228 ; 0xe4\n",
- " 5e8: 23e6 movs r3, #230 ; 0xe6\n",
- " 5ea: 23e8 movs r3, #232 ; 0xe8\n",
- " 5ec: 23ea movs r3, #234 ; 0xea\n",
- " 5ee: 23ec movs r3, #236 ; 0xec\n",
- " 5f0: 23ee movs r3, #238 ; 0xee\n",
- " 5f2: 23f0 movs r3, #240 ; 0xf0\n",
- " 5f4: 23f2 movs r3, #242 ; 0xf2\n",
- " 5f6: 23f4 movs r3, #244 ; 0xf4\n",
- " 5f8: 23f6 movs r3, #246 ; 0xf6\n",
- " 5fa: 23f8 movs r3, #248 ; 0xf8\n",
- " 5fc: 23fa movs r3, #250 ; 0xfa\n",
- " 5fe: 23fc movs r3, #252 ; 0xfc\n",
- " 600: 23fe movs r3, #254 ; 0xfe\n",
- " 602: 2300 movs r3, #0\n",
- " 604: 2302 movs r3, #2\n",
- " 606: 2304 movs r3, #4\n",
- " 608: 2306 movs r3, #6\n",
- " 60a: 2308 movs r3, #8\n",
- " 60c: 230a movs r3, #10\n",
- " 60e: 230c movs r3, #12\n",
- " 610: 230e movs r3, #14\n",
- " 612: 2310 movs r3, #16\n",
- " 614: 2312 movs r3, #18\n",
- " 616: 2314 movs r3, #20\n",
- " 618: 2316 movs r3, #22\n",
- " 61a: 2318 movs r3, #24\n",
- " 61c: 231a movs r3, #26\n",
- " 61e: 231c movs r3, #28\n",
- " 620: 231e movs r3, #30\n",
- " 622: 2320 movs r3, #32\n",
- " 624: 2322 movs r3, #34 ; 0x22\n",
- " 626: 2324 movs r3, #36 ; 0x24\n",
- " 628: 2326 movs r3, #38 ; 0x26\n",
- " 62a: 2328 movs r3, #40 ; 0x28\n",
- " 62c: 232a movs r3, #42 ; 0x2a\n",
- " 62e: 232c movs r3, #44 ; 0x2c\n",
- " 630: 232e movs r3, #46 ; 0x2e\n",
- " 632: 2330 movs r3, #48 ; 0x30\n",
- " 634: 2332 movs r3, #50 ; 0x32\n",
- " 636: 2334 movs r3, #52 ; 0x34\n",
- " 638: 2336 movs r3, #54 ; 0x36\n",
- " 63a: 2338 movs r3, #56 ; 0x38\n",
- " 63c: 233a movs r3, #58 ; 0x3a\n",
- " 63e: 233c movs r3, #60 ; 0x3c\n",
- " 640: 233e movs r3, #62 ; 0x3e\n",
- " 642: 2340 movs r3, #64 ; 0x40\n",
- " 644: 2342 movs r3, #66 ; 0x42\n",
- " 646: 2344 movs r3, #68 ; 0x44\n",
- " 648: 2346 movs r3, #70 ; 0x46\n",
- " 64a: 2348 movs r3, #72 ; 0x48\n",
- " 64c: 234a movs r3, #74 ; 0x4a\n",
- " 64e: 234c movs r3, #76 ; 0x4c\n",
- " 650: 234e movs r3, #78 ; 0x4e\n",
- " 652: 2350 movs r3, #80 ; 0x50\n",
- " 654: 2352 movs r3, #82 ; 0x52\n",
- " 656: 2354 movs r3, #84 ; 0x54\n",
- " 658: 2356 movs r3, #86 ; 0x56\n",
- " 65a: 2358 movs r3, #88 ; 0x58\n",
- " 65c: 235a movs r3, #90 ; 0x5a\n",
- " 65e: 235c movs r3, #92 ; 0x5c\n",
- " 660: 235e movs r3, #94 ; 0x5e\n",
- " 662: 2360 movs r3, #96 ; 0x60\n",
- " 664: 2362 movs r3, #98 ; 0x62\n",
- " 666: 2364 movs r3, #100 ; 0x64\n",
- " 668: 2366 movs r3, #102 ; 0x66\n",
- " 66a: 2368 movs r3, #104 ; 0x68\n",
- " 66c: 236a movs r3, #106 ; 0x6a\n",
- " 66e: 236c movs r3, #108 ; 0x6c\n",
- " 670: 236e movs r3, #110 ; 0x6e\n",
- " 672: 2370 movs r3, #112 ; 0x70\n",
- " 674: 2372 movs r3, #114 ; 0x72\n",
- " 676: 2374 movs r3, #116 ; 0x74\n",
- " 678: 2376 movs r3, #118 ; 0x76\n",
- " 67a: 2378 movs r3, #120 ; 0x78\n",
- " 67c: 237a movs r3, #122 ; 0x7a\n",
- " 67e: 237c movs r3, #124 ; 0x7c\n",
- " 680: 237e movs r3, #126 ; 0x7e\n",
- " 682: 2380 movs r3, #128 ; 0x80\n",
- " 684: 2382 movs r3, #130 ; 0x82\n",
- " 686: 2384 movs r3, #132 ; 0x84\n",
- " 688: 2386 movs r3, #134 ; 0x86\n",
- " 68a: 2388 movs r3, #136 ; 0x88\n",
- " 68c: 238a movs r3, #138 ; 0x8a\n",
- " 68e: 238c movs r3, #140 ; 0x8c\n",
- " 690: 238e movs r3, #142 ; 0x8e\n",
- " 692: 2390 movs r3, #144 ; 0x90\n",
- " 694: 2392 movs r3, #146 ; 0x92\n",
- " 696: 2394 movs r3, #148 ; 0x94\n",
- " 698: 2396 movs r3, #150 ; 0x96\n",
- " 69a: 2398 movs r3, #152 ; 0x98\n",
- " 69c: 239a movs r3, #154 ; 0x9a\n",
- " 69e: 239c movs r3, #156 ; 0x9c\n",
- " 6a0: 239e movs r3, #158 ; 0x9e\n",
- " 6a2: 23a0 movs r3, #160 ; 0xa0\n",
- " 6a4: 23a2 movs r3, #162 ; 0xa2\n",
- " 6a6: 23a4 movs r3, #164 ; 0xa4\n",
- " 6a8: 23a6 movs r3, #166 ; 0xa6\n",
- " 6aa: 23a8 movs r3, #168 ; 0xa8\n",
- " 6ac: 23aa movs r3, #170 ; 0xaa\n",
- " 6ae: 23ac movs r3, #172 ; 0xac\n",
- " 6b0: 23ae movs r3, #174 ; 0xae\n",
- " 6b2: 23b0 movs r3, #176 ; 0xb0\n",
- " 6b4: 23b2 movs r3, #178 ; 0xb2\n",
- " 6b6: 23b4 movs r3, #180 ; 0xb4\n",
- " 6b8: 23b6 movs r3, #182 ; 0xb6\n",
- " 6ba: 23b8 movs r3, #184 ; 0xb8\n",
- " 6bc: 23ba movs r3, #186 ; 0xba\n",
- " 6be: 23bc movs r3, #188 ; 0xbc\n",
- " 6c0: 23be movs r3, #190 ; 0xbe\n",
- " 6c2: 23c0 movs r3, #192 ; 0xc0\n",
- " 6c4: 23c2 movs r3, #194 ; 0xc2\n",
- " 6c6: 23c4 movs r3, #196 ; 0xc4\n",
- " 6c8: 23c6 movs r3, #198 ; 0xc6\n",
- " 6ca: 23c8 movs r3, #200 ; 0xc8\n",
- " 6cc: 23ca movs r3, #202 ; 0xca\n",
- " 6ce: 23cc movs r3, #204 ; 0xcc\n",
- " 6d0: 23ce movs r3, #206 ; 0xce\n",
- " 6d2: 23d0 movs r3, #208 ; 0xd0\n",
- " 6d4: 23d2 movs r3, #210 ; 0xd2\n",
- " 6d6: 23d4 movs r3, #212 ; 0xd4\n",
- " 6d8: 23d6 movs r3, #214 ; 0xd6\n",
- " 6da: 23d8 movs r3, #216 ; 0xd8\n",
- " 6dc: 23da movs r3, #218 ; 0xda\n",
- " 6de: 23dc movs r3, #220 ; 0xdc\n",
- " 6e0: 23de movs r3, #222 ; 0xde\n",
- " 6e2: 23e0 movs r3, #224 ; 0xe0\n",
- " 6e4: 23e2 movs r3, #226 ; 0xe2\n",
- " 6e6: 23e4 movs r3, #228 ; 0xe4\n",
- " 6e8: 23e6 movs r3, #230 ; 0xe6\n",
- " 6ea: 23e8 movs r3, #232 ; 0xe8\n",
- " 6ec: 23ea movs r3, #234 ; 0xea\n",
- " 6ee: 23ec movs r3, #236 ; 0xec\n",
- " 6f0: 23ee movs r3, #238 ; 0xee\n",
- " 6f2: 23f0 movs r3, #240 ; 0xf0\n",
- " 6f4: 23f2 movs r3, #242 ; 0xf2\n",
- " 6f6: 23f4 movs r3, #244 ; 0xf4\n",
- " 6f8: 23f6 movs r3, #246 ; 0xf6\n",
- " 6fa: 23f8 movs r3, #248 ; 0xf8\n",
- " 6fc: 23fa movs r3, #250 ; 0xfa\n",
- " 6fe: 23fc movs r3, #252 ; 0xfc\n",
- " 700: 23fe movs r3, #254 ; 0xfe\n",
- " 702: 2300 movs r3, #0\n",
- " 704: 2302 movs r3, #2\n",
- " 706: 2304 movs r3, #4\n",
- " 708: 2306 movs r3, #6\n",
- " 70a: 2308 movs r3, #8\n",
- " 70c: 230a movs r3, #10\n",
- " 70e: 230c movs r3, #12\n",
- " 710: 230e movs r3, #14\n",
- " 712: 2310 movs r3, #16\n",
- " 714: 2312 movs r3, #18\n",
- " 716: 2314 movs r3, #20\n",
- " 718: 2316 movs r3, #22\n",
- " 71a: 2318 movs r3, #24\n",
- " 71c: 231a movs r3, #26\n",
- " 71e: 231c movs r3, #28\n",
- " 720: 231e movs r3, #30\n",
- " 722: 2320 movs r3, #32\n",
- " 724: 2322 movs r3, #34 ; 0x22\n",
- " 726: 2324 movs r3, #36 ; 0x24\n",
- " 728: 2326 movs r3, #38 ; 0x26\n",
- " 72a: 2328 movs r3, #40 ; 0x28\n",
- " 72c: 232a movs r3, #42 ; 0x2a\n",
- " 72e: 232c movs r3, #44 ; 0x2c\n",
- " 730: 232e movs r3, #46 ; 0x2e\n",
- " 732: 2330 movs r3, #48 ; 0x30\n",
- " 734: 2332 movs r3, #50 ; 0x32\n",
- " 736: 2334 movs r3, #52 ; 0x34\n",
- " 738: 2336 movs r3, #54 ; 0x36\n",
- " 73a: 2338 movs r3, #56 ; 0x38\n",
- " 73c: 233a movs r3, #58 ; 0x3a\n",
- " 73e: 233c movs r3, #60 ; 0x3c\n",
- " 740: 233e movs r3, #62 ; 0x3e\n",
- " 742: 2340 movs r3, #64 ; 0x40\n",
- " 744: 2342 movs r3, #66 ; 0x42\n",
- " 746: 2344 movs r3, #68 ; 0x44\n",
- " 748: 2346 movs r3, #70 ; 0x46\n",
- " 74a: 2348 movs r3, #72 ; 0x48\n",
- " 74c: 234a movs r3, #74 ; 0x4a\n",
- " 74e: 234c movs r3, #76 ; 0x4c\n",
- " 750: 234e movs r3, #78 ; 0x4e\n",
- " 752: 2350 movs r3, #80 ; 0x50\n",
- " 754: 2352 movs r3, #82 ; 0x52\n",
- " 756: 2354 movs r3, #84 ; 0x54\n",
- " 758: 2356 movs r3, #86 ; 0x56\n",
- " 75a: 2358 movs r3, #88 ; 0x58\n",
- " 75c: 235a movs r3, #90 ; 0x5a\n",
- " 75e: 235c movs r3, #92 ; 0x5c\n",
- " 760: 235e movs r3, #94 ; 0x5e\n",
- " 762: 2360 movs r3, #96 ; 0x60\n",
- " 764: 2362 movs r3, #98 ; 0x62\n",
- " 766: 2364 movs r3, #100 ; 0x64\n",
- " 768: 2366 movs r3, #102 ; 0x66\n",
- " 76a: 2368 movs r3, #104 ; 0x68\n",
- " 76c: 236a movs r3, #106 ; 0x6a\n",
- " 76e: 236c movs r3, #108 ; 0x6c\n",
- " 770: 236e movs r3, #110 ; 0x6e\n",
- " 772: 2370 movs r3, #112 ; 0x70\n",
- " 774: 2372 movs r3, #114 ; 0x72\n",
- " 776: 2374 movs r3, #116 ; 0x74\n",
- " 778: 2376 movs r3, #118 ; 0x76\n",
- " 77a: 2378 movs r3, #120 ; 0x78\n",
- " 77c: 237a movs r3, #122 ; 0x7a\n",
- " 77e: 237c movs r3, #124 ; 0x7c\n",
- " 780: 237e movs r3, #126 ; 0x7e\n",
- " 782: 2380 movs r3, #128 ; 0x80\n",
- " 784: 2382 movs r3, #130 ; 0x82\n",
- " 786: 2384 movs r3, #132 ; 0x84\n",
- " 788: 2386 movs r3, #134 ; 0x86\n",
- " 78a: 2388 movs r3, #136 ; 0x88\n",
- " 78c: 238a movs r3, #138 ; 0x8a\n",
- " 78e: 238c movs r3, #140 ; 0x8c\n",
- " 790: 238e movs r3, #142 ; 0x8e\n",
- " 792: 2390 movs r3, #144 ; 0x90\n",
- " 794: 2392 movs r3, #146 ; 0x92\n",
- " 796: 2394 movs r3, #148 ; 0x94\n",
- " 798: 2396 movs r3, #150 ; 0x96\n",
- " 79a: 2398 movs r3, #152 ; 0x98\n",
- " 79c: 239a movs r3, #154 ; 0x9a\n",
- " 79e: 239c movs r3, #156 ; 0x9c\n",
- " 7a0: 239e movs r3, #158 ; 0x9e\n",
- " 7a2: 23a0 movs r3, #160 ; 0xa0\n",
- " 7a4: 23a2 movs r3, #162 ; 0xa2\n",
- " 7a6: 23a4 movs r3, #164 ; 0xa4\n",
- " 7a8: 23a6 movs r3, #166 ; 0xa6\n",
- " 7aa: 23a8 movs r3, #168 ; 0xa8\n",
- " 7ac: 23aa movs r3, #170 ; 0xaa\n",
- " 7ae: 23ac movs r3, #172 ; 0xac\n",
- " 7b0: 23ae movs r3, #174 ; 0xae\n",
- " 7b2: 23b0 movs r3, #176 ; 0xb0\n",
- " 7b4: 23b2 movs r3, #178 ; 0xb2\n",
- " 7b6: 23b4 movs r3, #180 ; 0xb4\n",
- " 7b8: 23b6 movs r3, #182 ; 0xb6\n",
- " 7ba: 23b8 movs r3, #184 ; 0xb8\n",
- " 7bc: 23ba movs r3, #186 ; 0xba\n",
- " 7be: 23bc movs r3, #188 ; 0xbc\n",
- " 7c0: 23be movs r3, #190 ; 0xbe\n",
- " 7c2: 23c0 movs r3, #192 ; 0xc0\n",
- " 7c4: 23c2 movs r3, #194 ; 0xc2\n",
- " 7c6: 23c4 movs r3, #196 ; 0xc4\n",
- " 7c8: 23c6 movs r3, #198 ; 0xc6\n",
- " 7ca: 23c8 movs r3, #200 ; 0xc8\n",
- " 7cc: 23ca movs r3, #202 ; 0xca\n",
- " 7ce: 23cc movs r3, #204 ; 0xcc\n",
- " 7d0: 23ce movs r3, #206 ; 0xce\n",
- " 7d2: 23d0 movs r3, #208 ; 0xd0\n",
- " 7d4: 23d2 movs r3, #210 ; 0xd2\n",
- " 7d6: 23d4 movs r3, #212 ; 0xd4\n",
- " 7d8: 23d6 movs r3, #214 ; 0xd6\n",
- " 7da: 23d8 movs r3, #216 ; 0xd8\n",
- " 7dc: 23da movs r3, #218 ; 0xda\n",
- " 7de: 23dc movs r3, #220 ; 0xdc\n",
- " 7e0: 23de movs r3, #222 ; 0xde\n",
- " 7e2: 23e0 movs r3, #224 ; 0xe0\n",
- " 7e4: 23e2 movs r3, #226 ; 0xe2\n",
- " 7e6: 23e4 movs r3, #228 ; 0xe4\n",
- " 7e8: 23e6 movs r3, #230 ; 0xe6\n",
- " 7ea: 23e8 movs r3, #232 ; 0xe8\n",
- " 7ec: 23ea movs r3, #234 ; 0xea\n",
- " 7ee: 23ec movs r3, #236 ; 0xec\n",
- " 7f0: 23ee movs r3, #238 ; 0xee\n",
- " 7f2: 23f0 movs r3, #240 ; 0xf0\n",
- " 7f4: 23f2 movs r3, #242 ; 0xf2\n",
- " 7f6: 23f4 movs r3, #244 ; 0xf4\n",
- " 7f8: 23f6 movs r3, #246 ; 0xf6\n",
- " 7fa: 23f8 movs r3, #248 ; 0xf8\n",
- " 7fc: 23fa movs r3, #250 ; 0xfa\n",
- " 7fe: 23fc movs r3, #252 ; 0xfc\n",
- " 800: 23fe movs r3, #254 ; 0xfe\n",
- " 802: 4611 mov r1, r2\n",
- nullptr
-};
-const char* const Branch32Results[] = {
- " 0: f000 bc01 b.w 806 <Branch32+0x806>\n",
- " 4: 2300 movs r3, #0\n",
- " 6: 2302 movs r3, #2\n",
- " 8: 2304 movs r3, #4\n",
- " a: 2306 movs r3, #6\n",
- " c: 2308 movs r3, #8\n",
- " e: 230a movs r3, #10\n",
- " 10: 230c movs r3, #12\n",
- " 12: 230e movs r3, #14\n",
- " 14: 2310 movs r3, #16\n",
- " 16: 2312 movs r3, #18\n",
- " 18: 2314 movs r3, #20\n",
- " 1a: 2316 movs r3, #22\n",
- " 1c: 2318 movs r3, #24\n",
- " 1e: 231a movs r3, #26\n",
- " 20: 231c movs r3, #28\n",
- " 22: 231e movs r3, #30\n",
- " 24: 2320 movs r3, #32\n",
- " 26: 2322 movs r3, #34 ; 0x22\n",
- " 28: 2324 movs r3, #36 ; 0x24\n",
- " 2a: 2326 movs r3, #38 ; 0x26\n",
- " 2c: 2328 movs r3, #40 ; 0x28\n",
- " 2e: 232a movs r3, #42 ; 0x2a\n",
- " 30: 232c movs r3, #44 ; 0x2c\n",
- " 32: 232e movs r3, #46 ; 0x2e\n",
- " 34: 2330 movs r3, #48 ; 0x30\n",
- " 36: 2332 movs r3, #50 ; 0x32\n",
- " 38: 2334 movs r3, #52 ; 0x34\n",
- " 3a: 2336 movs r3, #54 ; 0x36\n",
- " 3c: 2338 movs r3, #56 ; 0x38\n",
- " 3e: 233a movs r3, #58 ; 0x3a\n",
- " 40: 233c movs r3, #60 ; 0x3c\n",
- " 42: 233e movs r3, #62 ; 0x3e\n",
- " 44: 2340 movs r3, #64 ; 0x40\n",
- " 46: 2342 movs r3, #66 ; 0x42\n",
- " 48: 2344 movs r3, #68 ; 0x44\n",
- " 4a: 2346 movs r3, #70 ; 0x46\n",
- " 4c: 2348 movs r3, #72 ; 0x48\n",
- " 4e: 234a movs r3, #74 ; 0x4a\n",
- " 50: 234c movs r3, #76 ; 0x4c\n",
- " 52: 234e movs r3, #78 ; 0x4e\n",
- " 54: 2350 movs r3, #80 ; 0x50\n",
- " 56: 2352 movs r3, #82 ; 0x52\n",
- " 58: 2354 movs r3, #84 ; 0x54\n",
- " 5a: 2356 movs r3, #86 ; 0x56\n",
- " 5c: 2358 movs r3, #88 ; 0x58\n",
- " 5e: 235a movs r3, #90 ; 0x5a\n",
- " 60: 235c movs r3, #92 ; 0x5c\n",
- " 62: 235e movs r3, #94 ; 0x5e\n",
- " 64: 2360 movs r3, #96 ; 0x60\n",
- " 66: 2362 movs r3, #98 ; 0x62\n",
- " 68: 2364 movs r3, #100 ; 0x64\n",
- " 6a: 2366 movs r3, #102 ; 0x66\n",
- " 6c: 2368 movs r3, #104 ; 0x68\n",
- " 6e: 236a movs r3, #106 ; 0x6a\n",
- " 70: 236c movs r3, #108 ; 0x6c\n",
- " 72: 236e movs r3, #110 ; 0x6e\n",
- " 74: 2370 movs r3, #112 ; 0x70\n",
- " 76: 2372 movs r3, #114 ; 0x72\n",
- " 78: 2374 movs r3, #116 ; 0x74\n",
- " 7a: 2376 movs r3, #118 ; 0x76\n",
- " 7c: 2378 movs r3, #120 ; 0x78\n",
- " 7e: 237a movs r3, #122 ; 0x7a\n",
- " 80: 237c movs r3, #124 ; 0x7c\n",
- " 82: 237e movs r3, #126 ; 0x7e\n",
- " 84: 2380 movs r3, #128 ; 0x80\n",
- " 86: 2382 movs r3, #130 ; 0x82\n",
- " 88: 2384 movs r3, #132 ; 0x84\n",
- " 8a: 2386 movs r3, #134 ; 0x86\n",
- " 8c: 2388 movs r3, #136 ; 0x88\n",
- " 8e: 238a movs r3, #138 ; 0x8a\n",
- " 90: 238c movs r3, #140 ; 0x8c\n",
- " 92: 238e movs r3, #142 ; 0x8e\n",
- " 94: 2390 movs r3, #144 ; 0x90\n",
- " 96: 2392 movs r3, #146 ; 0x92\n",
- " 98: 2394 movs r3, #148 ; 0x94\n",
- " 9a: 2396 movs r3, #150 ; 0x96\n",
- " 9c: 2398 movs r3, #152 ; 0x98\n",
- " 9e: 239a movs r3, #154 ; 0x9a\n",
- " a0: 239c movs r3, #156 ; 0x9c\n",
- " a2: 239e movs r3, #158 ; 0x9e\n",
- " a4: 23a0 movs r3, #160 ; 0xa0\n",
- " a6: 23a2 movs r3, #162 ; 0xa2\n",
- " a8: 23a4 movs r3, #164 ; 0xa4\n",
- " aa: 23a6 movs r3, #166 ; 0xa6\n",
- " ac: 23a8 movs r3, #168 ; 0xa8\n",
- " ae: 23aa movs r3, #170 ; 0xaa\n",
- " b0: 23ac movs r3, #172 ; 0xac\n",
- " b2: 23ae movs r3, #174 ; 0xae\n",
- " b4: 23b0 movs r3, #176 ; 0xb0\n",
- " b6: 23b2 movs r3, #178 ; 0xb2\n",
- " b8: 23b4 movs r3, #180 ; 0xb4\n",
- " ba: 23b6 movs r3, #182 ; 0xb6\n",
- " bc: 23b8 movs r3, #184 ; 0xb8\n",
- " be: 23ba movs r3, #186 ; 0xba\n",
- " c0: 23bc movs r3, #188 ; 0xbc\n",
- " c2: 23be movs r3, #190 ; 0xbe\n",
- " c4: 23c0 movs r3, #192 ; 0xc0\n",
- " c6: 23c2 movs r3, #194 ; 0xc2\n",
- " c8: 23c4 movs r3, #196 ; 0xc4\n",
- " ca: 23c6 movs r3, #198 ; 0xc6\n",
- " cc: 23c8 movs r3, #200 ; 0xc8\n",
- " ce: 23ca movs r3, #202 ; 0xca\n",
- " d0: 23cc movs r3, #204 ; 0xcc\n",
- " d2: 23ce movs r3, #206 ; 0xce\n",
- " d4: 23d0 movs r3, #208 ; 0xd0\n",
- " d6: 23d2 movs r3, #210 ; 0xd2\n",
- " d8: 23d4 movs r3, #212 ; 0xd4\n",
- " da: 23d6 movs r3, #214 ; 0xd6\n",
- " dc: 23d8 movs r3, #216 ; 0xd8\n",
- " de: 23da movs r3, #218 ; 0xda\n",
- " e0: 23dc movs r3, #220 ; 0xdc\n",
- " e2: 23de movs r3, #222 ; 0xde\n",
- " e4: 23e0 movs r3, #224 ; 0xe0\n",
- " e6: 23e2 movs r3, #226 ; 0xe2\n",
- " e8: 23e4 movs r3, #228 ; 0xe4\n",
- " ea: 23e6 movs r3, #230 ; 0xe6\n",
- " ec: 23e8 movs r3, #232 ; 0xe8\n",
- " ee: 23ea movs r3, #234 ; 0xea\n",
- " f0: 23ec movs r3, #236 ; 0xec\n",
- " f2: 23ee movs r3, #238 ; 0xee\n",
- " f4: 23f0 movs r3, #240 ; 0xf0\n",
- " f6: 23f2 movs r3, #242 ; 0xf2\n",
- " f8: 23f4 movs r3, #244 ; 0xf4\n",
- " fa: 23f6 movs r3, #246 ; 0xf6\n",
- " fc: 23f8 movs r3, #248 ; 0xf8\n",
- " fe: 23fa movs r3, #250 ; 0xfa\n",
- " 100: 23fc movs r3, #252 ; 0xfc\n",
- " 102: 23fe movs r3, #254 ; 0xfe\n",
- " 104: 2300 movs r3, #0\n",
- " 106: 2302 movs r3, #2\n",
- " 108: 2304 movs r3, #4\n",
- " 10a: 2306 movs r3, #6\n",
- " 10c: 2308 movs r3, #8\n",
- " 10e: 230a movs r3, #10\n",
- " 110: 230c movs r3, #12\n",
- " 112: 230e movs r3, #14\n",
- " 114: 2310 movs r3, #16\n",
- " 116: 2312 movs r3, #18\n",
- " 118: 2314 movs r3, #20\n",
- " 11a: 2316 movs r3, #22\n",
- " 11c: 2318 movs r3, #24\n",
- " 11e: 231a movs r3, #26\n",
- " 120: 231c movs r3, #28\n",
- " 122: 231e movs r3, #30\n",
- " 124: 2320 movs r3, #32\n",
- " 126: 2322 movs r3, #34 ; 0x22\n",
- " 128: 2324 movs r3, #36 ; 0x24\n",
- " 12a: 2326 movs r3, #38 ; 0x26\n",
- " 12c: 2328 movs r3, #40 ; 0x28\n",
- " 12e: 232a movs r3, #42 ; 0x2a\n",
- " 130: 232c movs r3, #44 ; 0x2c\n",
- " 132: 232e movs r3, #46 ; 0x2e\n",
- " 134: 2330 movs r3, #48 ; 0x30\n",
- " 136: 2332 movs r3, #50 ; 0x32\n",
- " 138: 2334 movs r3, #52 ; 0x34\n",
- " 13a: 2336 movs r3, #54 ; 0x36\n",
- " 13c: 2338 movs r3, #56 ; 0x38\n",
- " 13e: 233a movs r3, #58 ; 0x3a\n",
- " 140: 233c movs r3, #60 ; 0x3c\n",
- " 142: 233e movs r3, #62 ; 0x3e\n",
- " 144: 2340 movs r3, #64 ; 0x40\n",
- " 146: 2342 movs r3, #66 ; 0x42\n",
- " 148: 2344 movs r3, #68 ; 0x44\n",
- " 14a: 2346 movs r3, #70 ; 0x46\n",
- " 14c: 2348 movs r3, #72 ; 0x48\n",
- " 14e: 234a movs r3, #74 ; 0x4a\n",
- " 150: 234c movs r3, #76 ; 0x4c\n",
- " 152: 234e movs r3, #78 ; 0x4e\n",
- " 154: 2350 movs r3, #80 ; 0x50\n",
- " 156: 2352 movs r3, #82 ; 0x52\n",
- " 158: 2354 movs r3, #84 ; 0x54\n",
- " 15a: 2356 movs r3, #86 ; 0x56\n",
- " 15c: 2358 movs r3, #88 ; 0x58\n",
- " 15e: 235a movs r3, #90 ; 0x5a\n",
- " 160: 235c movs r3, #92 ; 0x5c\n",
- " 162: 235e movs r3, #94 ; 0x5e\n",
- " 164: 2360 movs r3, #96 ; 0x60\n",
- " 166: 2362 movs r3, #98 ; 0x62\n",
- " 168: 2364 movs r3, #100 ; 0x64\n",
- " 16a: 2366 movs r3, #102 ; 0x66\n",
- " 16c: 2368 movs r3, #104 ; 0x68\n",
- " 16e: 236a movs r3, #106 ; 0x6a\n",
- " 170: 236c movs r3, #108 ; 0x6c\n",
- " 172: 236e movs r3, #110 ; 0x6e\n",
- " 174: 2370 movs r3, #112 ; 0x70\n",
- " 176: 2372 movs r3, #114 ; 0x72\n",
- " 178: 2374 movs r3, #116 ; 0x74\n",
- " 17a: 2376 movs r3, #118 ; 0x76\n",
- " 17c: 2378 movs r3, #120 ; 0x78\n",
- " 17e: 237a movs r3, #122 ; 0x7a\n",
- " 180: 237c movs r3, #124 ; 0x7c\n",
- " 182: 237e movs r3, #126 ; 0x7e\n",
- " 184: 2380 movs r3, #128 ; 0x80\n",
- " 186: 2382 movs r3, #130 ; 0x82\n",
- " 188: 2384 movs r3, #132 ; 0x84\n",
- " 18a: 2386 movs r3, #134 ; 0x86\n",
- " 18c: 2388 movs r3, #136 ; 0x88\n",
- " 18e: 238a movs r3, #138 ; 0x8a\n",
- " 190: 238c movs r3, #140 ; 0x8c\n",
- " 192: 238e movs r3, #142 ; 0x8e\n",
- " 194: 2390 movs r3, #144 ; 0x90\n",
- " 196: 2392 movs r3, #146 ; 0x92\n",
- " 198: 2394 movs r3, #148 ; 0x94\n",
- " 19a: 2396 movs r3, #150 ; 0x96\n",
- " 19c: 2398 movs r3, #152 ; 0x98\n",
- " 19e: 239a movs r3, #154 ; 0x9a\n",
- " 1a0: 239c movs r3, #156 ; 0x9c\n",
- " 1a2: 239e movs r3, #158 ; 0x9e\n",
- " 1a4: 23a0 movs r3, #160 ; 0xa0\n",
- " 1a6: 23a2 movs r3, #162 ; 0xa2\n",
- " 1a8: 23a4 movs r3, #164 ; 0xa4\n",
- " 1aa: 23a6 movs r3, #166 ; 0xa6\n",
- " 1ac: 23a8 movs r3, #168 ; 0xa8\n",
- " 1ae: 23aa movs r3, #170 ; 0xaa\n",
- " 1b0: 23ac movs r3, #172 ; 0xac\n",
- " 1b2: 23ae movs r3, #174 ; 0xae\n",
- " 1b4: 23b0 movs r3, #176 ; 0xb0\n",
- " 1b6: 23b2 movs r3, #178 ; 0xb2\n",
- " 1b8: 23b4 movs r3, #180 ; 0xb4\n",
- " 1ba: 23b6 movs r3, #182 ; 0xb6\n",
- " 1bc: 23b8 movs r3, #184 ; 0xb8\n",
- " 1be: 23ba movs r3, #186 ; 0xba\n",
- " 1c0: 23bc movs r3, #188 ; 0xbc\n",
- " 1c2: 23be movs r3, #190 ; 0xbe\n",
- " 1c4: 23c0 movs r3, #192 ; 0xc0\n",
- " 1c6: 23c2 movs r3, #194 ; 0xc2\n",
- " 1c8: 23c4 movs r3, #196 ; 0xc4\n",
- " 1ca: 23c6 movs r3, #198 ; 0xc6\n",
- " 1cc: 23c8 movs r3, #200 ; 0xc8\n",
- " 1ce: 23ca movs r3, #202 ; 0xca\n",
- " 1d0: 23cc movs r3, #204 ; 0xcc\n",
- " 1d2: 23ce movs r3, #206 ; 0xce\n",
- " 1d4: 23d0 movs r3, #208 ; 0xd0\n",
- " 1d6: 23d2 movs r3, #210 ; 0xd2\n",
- " 1d8: 23d4 movs r3, #212 ; 0xd4\n",
- " 1da: 23d6 movs r3, #214 ; 0xd6\n",
- " 1dc: 23d8 movs r3, #216 ; 0xd8\n",
- " 1de: 23da movs r3, #218 ; 0xda\n",
- " 1e0: 23dc movs r3, #220 ; 0xdc\n",
- " 1e2: 23de movs r3, #222 ; 0xde\n",
- " 1e4: 23e0 movs r3, #224 ; 0xe0\n",
- " 1e6: 23e2 movs r3, #226 ; 0xe2\n",
- " 1e8: 23e4 movs r3, #228 ; 0xe4\n",
- " 1ea: 23e6 movs r3, #230 ; 0xe6\n",
- " 1ec: 23e8 movs r3, #232 ; 0xe8\n",
- " 1ee: 23ea movs r3, #234 ; 0xea\n",
- " 1f0: 23ec movs r3, #236 ; 0xec\n",
- " 1f2: 23ee movs r3, #238 ; 0xee\n",
- " 1f4: 23f0 movs r3, #240 ; 0xf0\n",
- " 1f6: 23f2 movs r3, #242 ; 0xf2\n",
- " 1f8: 23f4 movs r3, #244 ; 0xf4\n",
- " 1fa: 23f6 movs r3, #246 ; 0xf6\n",
- " 1fc: 23f8 movs r3, #248 ; 0xf8\n",
- " 1fe: 23fa movs r3, #250 ; 0xfa\n",
- " 200: 23fc movs r3, #252 ; 0xfc\n",
- " 202: 23fe movs r3, #254 ; 0xfe\n",
- " 204: 2300 movs r3, #0\n",
- " 206: 2302 movs r3, #2\n",
- " 208: 2304 movs r3, #4\n",
- " 20a: 2306 movs r3, #6\n",
- " 20c: 2308 movs r3, #8\n",
- " 20e: 230a movs r3, #10\n",
- " 210: 230c movs r3, #12\n",
- " 212: 230e movs r3, #14\n",
- " 214: 2310 movs r3, #16\n",
- " 216: 2312 movs r3, #18\n",
- " 218: 2314 movs r3, #20\n",
- " 21a: 2316 movs r3, #22\n",
- " 21c: 2318 movs r3, #24\n",
- " 21e: 231a movs r3, #26\n",
- " 220: 231c movs r3, #28\n",
- " 222: 231e movs r3, #30\n",
- " 224: 2320 movs r3, #32\n",
- " 226: 2322 movs r3, #34 ; 0x22\n",
- " 228: 2324 movs r3, #36 ; 0x24\n",
- " 22a: 2326 movs r3, #38 ; 0x26\n",
- " 22c: 2328 movs r3, #40 ; 0x28\n",
- " 22e: 232a movs r3, #42 ; 0x2a\n",
- " 230: 232c movs r3, #44 ; 0x2c\n",
- " 232: 232e movs r3, #46 ; 0x2e\n",
- " 234: 2330 movs r3, #48 ; 0x30\n",
- " 236: 2332 movs r3, #50 ; 0x32\n",
- " 238: 2334 movs r3, #52 ; 0x34\n",
- " 23a: 2336 movs r3, #54 ; 0x36\n",
- " 23c: 2338 movs r3, #56 ; 0x38\n",
- " 23e: 233a movs r3, #58 ; 0x3a\n",
- " 240: 233c movs r3, #60 ; 0x3c\n",
- " 242: 233e movs r3, #62 ; 0x3e\n",
- " 244: 2340 movs r3, #64 ; 0x40\n",
- " 246: 2342 movs r3, #66 ; 0x42\n",
- " 248: 2344 movs r3, #68 ; 0x44\n",
- " 24a: 2346 movs r3, #70 ; 0x46\n",
- " 24c: 2348 movs r3, #72 ; 0x48\n",
- " 24e: 234a movs r3, #74 ; 0x4a\n",
- " 250: 234c movs r3, #76 ; 0x4c\n",
- " 252: 234e movs r3, #78 ; 0x4e\n",
- " 254: 2350 movs r3, #80 ; 0x50\n",
- " 256: 2352 movs r3, #82 ; 0x52\n",
- " 258: 2354 movs r3, #84 ; 0x54\n",
- " 25a: 2356 movs r3, #86 ; 0x56\n",
- " 25c: 2358 movs r3, #88 ; 0x58\n",
- " 25e: 235a movs r3, #90 ; 0x5a\n",
- " 260: 235c movs r3, #92 ; 0x5c\n",
- " 262: 235e movs r3, #94 ; 0x5e\n",
- " 264: 2360 movs r3, #96 ; 0x60\n",
- " 266: 2362 movs r3, #98 ; 0x62\n",
- " 268: 2364 movs r3, #100 ; 0x64\n",
- " 26a: 2366 movs r3, #102 ; 0x66\n",
- " 26c: 2368 movs r3, #104 ; 0x68\n",
- " 26e: 236a movs r3, #106 ; 0x6a\n",
- " 270: 236c movs r3, #108 ; 0x6c\n",
- " 272: 236e movs r3, #110 ; 0x6e\n",
- " 274: 2370 movs r3, #112 ; 0x70\n",
- " 276: 2372 movs r3, #114 ; 0x72\n",
- " 278: 2374 movs r3, #116 ; 0x74\n",
- " 27a: 2376 movs r3, #118 ; 0x76\n",
- " 27c: 2378 movs r3, #120 ; 0x78\n",
- " 27e: 237a movs r3, #122 ; 0x7a\n",
- " 280: 237c movs r3, #124 ; 0x7c\n",
- " 282: 237e movs r3, #126 ; 0x7e\n",
- " 284: 2380 movs r3, #128 ; 0x80\n",
- " 286: 2382 movs r3, #130 ; 0x82\n",
- " 288: 2384 movs r3, #132 ; 0x84\n",
- " 28a: 2386 movs r3, #134 ; 0x86\n",
- " 28c: 2388 movs r3, #136 ; 0x88\n",
- " 28e: 238a movs r3, #138 ; 0x8a\n",
- " 290: 238c movs r3, #140 ; 0x8c\n",
- " 292: 238e movs r3, #142 ; 0x8e\n",
- " 294: 2390 movs r3, #144 ; 0x90\n",
- " 296: 2392 movs r3, #146 ; 0x92\n",
- " 298: 2394 movs r3, #148 ; 0x94\n",
- " 29a: 2396 movs r3, #150 ; 0x96\n",
- " 29c: 2398 movs r3, #152 ; 0x98\n",
- " 29e: 239a movs r3, #154 ; 0x9a\n",
- " 2a0: 239c movs r3, #156 ; 0x9c\n",
- " 2a2: 239e movs r3, #158 ; 0x9e\n",
- " 2a4: 23a0 movs r3, #160 ; 0xa0\n",
- " 2a6: 23a2 movs r3, #162 ; 0xa2\n",
- " 2a8: 23a4 movs r3, #164 ; 0xa4\n",
- " 2aa: 23a6 movs r3, #166 ; 0xa6\n",
- " 2ac: 23a8 movs r3, #168 ; 0xa8\n",
- " 2ae: 23aa movs r3, #170 ; 0xaa\n",
- " 2b0: 23ac movs r3, #172 ; 0xac\n",
- " 2b2: 23ae movs r3, #174 ; 0xae\n",
- " 2b4: 23b0 movs r3, #176 ; 0xb0\n",
- " 2b6: 23b2 movs r3, #178 ; 0xb2\n",
- " 2b8: 23b4 movs r3, #180 ; 0xb4\n",
- " 2ba: 23b6 movs r3, #182 ; 0xb6\n",
- " 2bc: 23b8 movs r3, #184 ; 0xb8\n",
- " 2be: 23ba movs r3, #186 ; 0xba\n",
- " 2c0: 23bc movs r3, #188 ; 0xbc\n",
- " 2c2: 23be movs r3, #190 ; 0xbe\n",
- " 2c4: 23c0 movs r3, #192 ; 0xc0\n",
- " 2c6: 23c2 movs r3, #194 ; 0xc2\n",
- " 2c8: 23c4 movs r3, #196 ; 0xc4\n",
- " 2ca: 23c6 movs r3, #198 ; 0xc6\n",
- " 2cc: 23c8 movs r3, #200 ; 0xc8\n",
- " 2ce: 23ca movs r3, #202 ; 0xca\n",
- " 2d0: 23cc movs r3, #204 ; 0xcc\n",
- " 2d2: 23ce movs r3, #206 ; 0xce\n",
- " 2d4: 23d0 movs r3, #208 ; 0xd0\n",
- " 2d6: 23d2 movs r3, #210 ; 0xd2\n",
- " 2d8: 23d4 movs r3, #212 ; 0xd4\n",
- " 2da: 23d6 movs r3, #214 ; 0xd6\n",
- " 2dc: 23d8 movs r3, #216 ; 0xd8\n",
- " 2de: 23da movs r3, #218 ; 0xda\n",
- " 2e0: 23dc movs r3, #220 ; 0xdc\n",
- " 2e2: 23de movs r3, #222 ; 0xde\n",
- " 2e4: 23e0 movs r3, #224 ; 0xe0\n",
- " 2e6: 23e2 movs r3, #226 ; 0xe2\n",
- " 2e8: 23e4 movs r3, #228 ; 0xe4\n",
- " 2ea: 23e6 movs r3, #230 ; 0xe6\n",
- " 2ec: 23e8 movs r3, #232 ; 0xe8\n",
- " 2ee: 23ea movs r3, #234 ; 0xea\n",
- " 2f0: 23ec movs r3, #236 ; 0xec\n",
- " 2f2: 23ee movs r3, #238 ; 0xee\n",
- " 2f4: 23f0 movs r3, #240 ; 0xf0\n",
- " 2f6: 23f2 movs r3, #242 ; 0xf2\n",
- " 2f8: 23f4 movs r3, #244 ; 0xf4\n",
- " 2fa: 23f6 movs r3, #246 ; 0xf6\n",
- " 2fc: 23f8 movs r3, #248 ; 0xf8\n",
- " 2fe: 23fa movs r3, #250 ; 0xfa\n",
- " 300: 23fc movs r3, #252 ; 0xfc\n",
- " 302: 23fe movs r3, #254 ; 0xfe\n",
- " 304: 2300 movs r3, #0\n",
- " 306: 2302 movs r3, #2\n",
- " 308: 2304 movs r3, #4\n",
- " 30a: 2306 movs r3, #6\n",
- " 30c: 2308 movs r3, #8\n",
- " 30e: 230a movs r3, #10\n",
- " 310: 230c movs r3, #12\n",
- " 312: 230e movs r3, #14\n",
- " 314: 2310 movs r3, #16\n",
- " 316: 2312 movs r3, #18\n",
- " 318: 2314 movs r3, #20\n",
- " 31a: 2316 movs r3, #22\n",
- " 31c: 2318 movs r3, #24\n",
- " 31e: 231a movs r3, #26\n",
- " 320: 231c movs r3, #28\n",
- " 322: 231e movs r3, #30\n",
- " 324: 2320 movs r3, #32\n",
- " 326: 2322 movs r3, #34 ; 0x22\n",
- " 328: 2324 movs r3, #36 ; 0x24\n",
- " 32a: 2326 movs r3, #38 ; 0x26\n",
- " 32c: 2328 movs r3, #40 ; 0x28\n",
- " 32e: 232a movs r3, #42 ; 0x2a\n",
- " 330: 232c movs r3, #44 ; 0x2c\n",
- " 332: 232e movs r3, #46 ; 0x2e\n",
- " 334: 2330 movs r3, #48 ; 0x30\n",
- " 336: 2332 movs r3, #50 ; 0x32\n",
- " 338: 2334 movs r3, #52 ; 0x34\n",
- " 33a: 2336 movs r3, #54 ; 0x36\n",
- " 33c: 2338 movs r3, #56 ; 0x38\n",
- " 33e: 233a movs r3, #58 ; 0x3a\n",
- " 340: 233c movs r3, #60 ; 0x3c\n",
- " 342: 233e movs r3, #62 ; 0x3e\n",
- " 344: 2340 movs r3, #64 ; 0x40\n",
- " 346: 2342 movs r3, #66 ; 0x42\n",
- " 348: 2344 movs r3, #68 ; 0x44\n",
- " 34a: 2346 movs r3, #70 ; 0x46\n",
- " 34c: 2348 movs r3, #72 ; 0x48\n",
- " 34e: 234a movs r3, #74 ; 0x4a\n",
- " 350: 234c movs r3, #76 ; 0x4c\n",
- " 352: 234e movs r3, #78 ; 0x4e\n",
- " 354: 2350 movs r3, #80 ; 0x50\n",
- " 356: 2352 movs r3, #82 ; 0x52\n",
- " 358: 2354 movs r3, #84 ; 0x54\n",
- " 35a: 2356 movs r3, #86 ; 0x56\n",
- " 35c: 2358 movs r3, #88 ; 0x58\n",
- " 35e: 235a movs r3, #90 ; 0x5a\n",
- " 360: 235c movs r3, #92 ; 0x5c\n",
- " 362: 235e movs r3, #94 ; 0x5e\n",
- " 364: 2360 movs r3, #96 ; 0x60\n",
- " 366: 2362 movs r3, #98 ; 0x62\n",
- " 368: 2364 movs r3, #100 ; 0x64\n",
- " 36a: 2366 movs r3, #102 ; 0x66\n",
- " 36c: 2368 movs r3, #104 ; 0x68\n",
- " 36e: 236a movs r3, #106 ; 0x6a\n",
- " 370: 236c movs r3, #108 ; 0x6c\n",
- " 372: 236e movs r3, #110 ; 0x6e\n",
- " 374: 2370 movs r3, #112 ; 0x70\n",
- " 376: 2372 movs r3, #114 ; 0x72\n",
- " 378: 2374 movs r3, #116 ; 0x74\n",
- " 37a: 2376 movs r3, #118 ; 0x76\n",
- " 37c: 2378 movs r3, #120 ; 0x78\n",
- " 37e: 237a movs r3, #122 ; 0x7a\n",
- " 380: 237c movs r3, #124 ; 0x7c\n",
- " 382: 237e movs r3, #126 ; 0x7e\n",
- " 384: 2380 movs r3, #128 ; 0x80\n",
- " 386: 2382 movs r3, #130 ; 0x82\n",
- " 388: 2384 movs r3, #132 ; 0x84\n",
- " 38a: 2386 movs r3, #134 ; 0x86\n",
- " 38c: 2388 movs r3, #136 ; 0x88\n",
- " 38e: 238a movs r3, #138 ; 0x8a\n",
- " 390: 238c movs r3, #140 ; 0x8c\n",
- " 392: 238e movs r3, #142 ; 0x8e\n",
- " 394: 2390 movs r3, #144 ; 0x90\n",
- " 396: 2392 movs r3, #146 ; 0x92\n",
- " 398: 2394 movs r3, #148 ; 0x94\n",
- " 39a: 2396 movs r3, #150 ; 0x96\n",
- " 39c: 2398 movs r3, #152 ; 0x98\n",
- " 39e: 239a movs r3, #154 ; 0x9a\n",
- " 3a0: 239c movs r3, #156 ; 0x9c\n",
- " 3a2: 239e movs r3, #158 ; 0x9e\n",
- " 3a4: 23a0 movs r3, #160 ; 0xa0\n",
- " 3a6: 23a2 movs r3, #162 ; 0xa2\n",
- " 3a8: 23a4 movs r3, #164 ; 0xa4\n",
- " 3aa: 23a6 movs r3, #166 ; 0xa6\n",
- " 3ac: 23a8 movs r3, #168 ; 0xa8\n",
- " 3ae: 23aa movs r3, #170 ; 0xaa\n",
- " 3b0: 23ac movs r3, #172 ; 0xac\n",
- " 3b2: 23ae movs r3, #174 ; 0xae\n",
- " 3b4: 23b0 movs r3, #176 ; 0xb0\n",
- " 3b6: 23b2 movs r3, #178 ; 0xb2\n",
- " 3b8: 23b4 movs r3, #180 ; 0xb4\n",
- " 3ba: 23b6 movs r3, #182 ; 0xb6\n",
- " 3bc: 23b8 movs r3, #184 ; 0xb8\n",
- " 3be: 23ba movs r3, #186 ; 0xba\n",
- " 3c0: 23bc movs r3, #188 ; 0xbc\n",
- " 3c2: 23be movs r3, #190 ; 0xbe\n",
- " 3c4: 23c0 movs r3, #192 ; 0xc0\n",
- " 3c6: 23c2 movs r3, #194 ; 0xc2\n",
- " 3c8: 23c4 movs r3, #196 ; 0xc4\n",
- " 3ca: 23c6 movs r3, #198 ; 0xc6\n",
- " 3cc: 23c8 movs r3, #200 ; 0xc8\n",
- " 3ce: 23ca movs r3, #202 ; 0xca\n",
- " 3d0: 23cc movs r3, #204 ; 0xcc\n",
- " 3d2: 23ce movs r3, #206 ; 0xce\n",
- " 3d4: 23d0 movs r3, #208 ; 0xd0\n",
- " 3d6: 23d2 movs r3, #210 ; 0xd2\n",
- " 3d8: 23d4 movs r3, #212 ; 0xd4\n",
- " 3da: 23d6 movs r3, #214 ; 0xd6\n",
- " 3dc: 23d8 movs r3, #216 ; 0xd8\n",
- " 3de: 23da movs r3, #218 ; 0xda\n",
- " 3e0: 23dc movs r3, #220 ; 0xdc\n",
- " 3e2: 23de movs r3, #222 ; 0xde\n",
- " 3e4: 23e0 movs r3, #224 ; 0xe0\n",
- " 3e6: 23e2 movs r3, #226 ; 0xe2\n",
- " 3e8: 23e4 movs r3, #228 ; 0xe4\n",
- " 3ea: 23e6 movs r3, #230 ; 0xe6\n",
- " 3ec: 23e8 movs r3, #232 ; 0xe8\n",
- " 3ee: 23ea movs r3, #234 ; 0xea\n",
- " 3f0: 23ec movs r3, #236 ; 0xec\n",
- " 3f2: 23ee movs r3, #238 ; 0xee\n",
- " 3f4: 23f0 movs r3, #240 ; 0xf0\n",
- " 3f6: 23f2 movs r3, #242 ; 0xf2\n",
- " 3f8: 23f4 movs r3, #244 ; 0xf4\n",
- " 3fa: 23f6 movs r3, #246 ; 0xf6\n",
- " 3fc: 23f8 movs r3, #248 ; 0xf8\n",
- " 3fe: 23fa movs r3, #250 ; 0xfa\n",
- " 400: 23fc movs r3, #252 ; 0xfc\n",
- " 402: 23fe movs r3, #254 ; 0xfe\n",
- " 404: 2300 movs r3, #0\n",
- " 406: 2302 movs r3, #2\n",
- " 408: 2304 movs r3, #4\n",
- " 40a: 2306 movs r3, #6\n",
- " 40c: 2308 movs r3, #8\n",
- " 40e: 230a movs r3, #10\n",
- " 410: 230c movs r3, #12\n",
- " 412: 230e movs r3, #14\n",
- " 414: 2310 movs r3, #16\n",
- " 416: 2312 movs r3, #18\n",
- " 418: 2314 movs r3, #20\n",
- " 41a: 2316 movs r3, #22\n",
- " 41c: 2318 movs r3, #24\n",
- " 41e: 231a movs r3, #26\n",
- " 420: 231c movs r3, #28\n",
- " 422: 231e movs r3, #30\n",
- " 424: 2320 movs r3, #32\n",
- " 426: 2322 movs r3, #34 ; 0x22\n",
- " 428: 2324 movs r3, #36 ; 0x24\n",
- " 42a: 2326 movs r3, #38 ; 0x26\n",
- " 42c: 2328 movs r3, #40 ; 0x28\n",
- " 42e: 232a movs r3, #42 ; 0x2a\n",
- " 430: 232c movs r3, #44 ; 0x2c\n",
- " 432: 232e movs r3, #46 ; 0x2e\n",
- " 434: 2330 movs r3, #48 ; 0x30\n",
- " 436: 2332 movs r3, #50 ; 0x32\n",
- " 438: 2334 movs r3, #52 ; 0x34\n",
- " 43a: 2336 movs r3, #54 ; 0x36\n",
- " 43c: 2338 movs r3, #56 ; 0x38\n",
- " 43e: 233a movs r3, #58 ; 0x3a\n",
- " 440: 233c movs r3, #60 ; 0x3c\n",
- " 442: 233e movs r3, #62 ; 0x3e\n",
- " 444: 2340 movs r3, #64 ; 0x40\n",
- " 446: 2342 movs r3, #66 ; 0x42\n",
- " 448: 2344 movs r3, #68 ; 0x44\n",
- " 44a: 2346 movs r3, #70 ; 0x46\n",
- " 44c: 2348 movs r3, #72 ; 0x48\n",
- " 44e: 234a movs r3, #74 ; 0x4a\n",
- " 450: 234c movs r3, #76 ; 0x4c\n",
- " 452: 234e movs r3, #78 ; 0x4e\n",
- " 454: 2350 movs r3, #80 ; 0x50\n",
- " 456: 2352 movs r3, #82 ; 0x52\n",
- " 458: 2354 movs r3, #84 ; 0x54\n",
- " 45a: 2356 movs r3, #86 ; 0x56\n",
- " 45c: 2358 movs r3, #88 ; 0x58\n",
- " 45e: 235a movs r3, #90 ; 0x5a\n",
- " 460: 235c movs r3, #92 ; 0x5c\n",
- " 462: 235e movs r3, #94 ; 0x5e\n",
- " 464: 2360 movs r3, #96 ; 0x60\n",
- " 466: 2362 movs r3, #98 ; 0x62\n",
- " 468: 2364 movs r3, #100 ; 0x64\n",
- " 46a: 2366 movs r3, #102 ; 0x66\n",
- " 46c: 2368 movs r3, #104 ; 0x68\n",
- " 46e: 236a movs r3, #106 ; 0x6a\n",
- " 470: 236c movs r3, #108 ; 0x6c\n",
- " 472: 236e movs r3, #110 ; 0x6e\n",
- " 474: 2370 movs r3, #112 ; 0x70\n",
- " 476: 2372 movs r3, #114 ; 0x72\n",
- " 478: 2374 movs r3, #116 ; 0x74\n",
- " 47a: 2376 movs r3, #118 ; 0x76\n",
- " 47c: 2378 movs r3, #120 ; 0x78\n",
- " 47e: 237a movs r3, #122 ; 0x7a\n",
- " 480: 237c movs r3, #124 ; 0x7c\n",
- " 482: 237e movs r3, #126 ; 0x7e\n",
- " 484: 2380 movs r3, #128 ; 0x80\n",
- " 486: 2382 movs r3, #130 ; 0x82\n",
- " 488: 2384 movs r3, #132 ; 0x84\n",
- " 48a: 2386 movs r3, #134 ; 0x86\n",
- " 48c: 2388 movs r3, #136 ; 0x88\n",
- " 48e: 238a movs r3, #138 ; 0x8a\n",
- " 490: 238c movs r3, #140 ; 0x8c\n",
- " 492: 238e movs r3, #142 ; 0x8e\n",
- " 494: 2390 movs r3, #144 ; 0x90\n",
- " 496: 2392 movs r3, #146 ; 0x92\n",
- " 498: 2394 movs r3, #148 ; 0x94\n",
- " 49a: 2396 movs r3, #150 ; 0x96\n",
- " 49c: 2398 movs r3, #152 ; 0x98\n",
- " 49e: 239a movs r3, #154 ; 0x9a\n",
- " 4a0: 239c movs r3, #156 ; 0x9c\n",
- " 4a2: 239e movs r3, #158 ; 0x9e\n",
- " 4a4: 23a0 movs r3, #160 ; 0xa0\n",
- " 4a6: 23a2 movs r3, #162 ; 0xa2\n",
- " 4a8: 23a4 movs r3, #164 ; 0xa4\n",
- " 4aa: 23a6 movs r3, #166 ; 0xa6\n",
- " 4ac: 23a8 movs r3, #168 ; 0xa8\n",
- " 4ae: 23aa movs r3, #170 ; 0xaa\n",
- " 4b0: 23ac movs r3, #172 ; 0xac\n",
- " 4b2: 23ae movs r3, #174 ; 0xae\n",
- " 4b4: 23b0 movs r3, #176 ; 0xb0\n",
- " 4b6: 23b2 movs r3, #178 ; 0xb2\n",
- " 4b8: 23b4 movs r3, #180 ; 0xb4\n",
- " 4ba: 23b6 movs r3, #182 ; 0xb6\n",
- " 4bc: 23b8 movs r3, #184 ; 0xb8\n",
- " 4be: 23ba movs r3, #186 ; 0xba\n",
- " 4c0: 23bc movs r3, #188 ; 0xbc\n",
- " 4c2: 23be movs r3, #190 ; 0xbe\n",
- " 4c4: 23c0 movs r3, #192 ; 0xc0\n",
- " 4c6: 23c2 movs r3, #194 ; 0xc2\n",
- " 4c8: 23c4 movs r3, #196 ; 0xc4\n",
- " 4ca: 23c6 movs r3, #198 ; 0xc6\n",
- " 4cc: 23c8 movs r3, #200 ; 0xc8\n",
- " 4ce: 23ca movs r3, #202 ; 0xca\n",
- " 4d0: 23cc movs r3, #204 ; 0xcc\n",
- " 4d2: 23ce movs r3, #206 ; 0xce\n",
- " 4d4: 23d0 movs r3, #208 ; 0xd0\n",
- " 4d6: 23d2 movs r3, #210 ; 0xd2\n",
- " 4d8: 23d4 movs r3, #212 ; 0xd4\n",
- " 4da: 23d6 movs r3, #214 ; 0xd6\n",
- " 4dc: 23d8 movs r3, #216 ; 0xd8\n",
- " 4de: 23da movs r3, #218 ; 0xda\n",
- " 4e0: 23dc movs r3, #220 ; 0xdc\n",
- " 4e2: 23de movs r3, #222 ; 0xde\n",
- " 4e4: 23e0 movs r3, #224 ; 0xe0\n",
- " 4e6: 23e2 movs r3, #226 ; 0xe2\n",
- " 4e8: 23e4 movs r3, #228 ; 0xe4\n",
- " 4ea: 23e6 movs r3, #230 ; 0xe6\n",
- " 4ec: 23e8 movs r3, #232 ; 0xe8\n",
- " 4ee: 23ea movs r3, #234 ; 0xea\n",
- " 4f0: 23ec movs r3, #236 ; 0xec\n",
- " 4f2: 23ee movs r3, #238 ; 0xee\n",
- " 4f4: 23f0 movs r3, #240 ; 0xf0\n",
- " 4f6: 23f2 movs r3, #242 ; 0xf2\n",
- " 4f8: 23f4 movs r3, #244 ; 0xf4\n",
- " 4fa: 23f6 movs r3, #246 ; 0xf6\n",
- " 4fc: 23f8 movs r3, #248 ; 0xf8\n",
- " 4fe: 23fa movs r3, #250 ; 0xfa\n",
- " 500: 23fc movs r3, #252 ; 0xfc\n",
- " 502: 23fe movs r3, #254 ; 0xfe\n",
- " 504: 2300 movs r3, #0\n",
- " 506: 2302 movs r3, #2\n",
- " 508: 2304 movs r3, #4\n",
- " 50a: 2306 movs r3, #6\n",
- " 50c: 2308 movs r3, #8\n",
- " 50e: 230a movs r3, #10\n",
- " 510: 230c movs r3, #12\n",
- " 512: 230e movs r3, #14\n",
- " 514: 2310 movs r3, #16\n",
- " 516: 2312 movs r3, #18\n",
- " 518: 2314 movs r3, #20\n",
- " 51a: 2316 movs r3, #22\n",
- " 51c: 2318 movs r3, #24\n",
- " 51e: 231a movs r3, #26\n",
- " 520: 231c movs r3, #28\n",
- " 522: 231e movs r3, #30\n",
- " 524: 2320 movs r3, #32\n",
- " 526: 2322 movs r3, #34 ; 0x22\n",
- " 528: 2324 movs r3, #36 ; 0x24\n",
- " 52a: 2326 movs r3, #38 ; 0x26\n",
- " 52c: 2328 movs r3, #40 ; 0x28\n",
- " 52e: 232a movs r3, #42 ; 0x2a\n",
- " 530: 232c movs r3, #44 ; 0x2c\n",
- " 532: 232e movs r3, #46 ; 0x2e\n",
- " 534: 2330 movs r3, #48 ; 0x30\n",
- " 536: 2332 movs r3, #50 ; 0x32\n",
- " 538: 2334 movs r3, #52 ; 0x34\n",
- " 53a: 2336 movs r3, #54 ; 0x36\n",
- " 53c: 2338 movs r3, #56 ; 0x38\n",
- " 53e: 233a movs r3, #58 ; 0x3a\n",
- " 540: 233c movs r3, #60 ; 0x3c\n",
- " 542: 233e movs r3, #62 ; 0x3e\n",
- " 544: 2340 movs r3, #64 ; 0x40\n",
- " 546: 2342 movs r3, #66 ; 0x42\n",
- " 548: 2344 movs r3, #68 ; 0x44\n",
- " 54a: 2346 movs r3, #70 ; 0x46\n",
- " 54c: 2348 movs r3, #72 ; 0x48\n",
- " 54e: 234a movs r3, #74 ; 0x4a\n",
- " 550: 234c movs r3, #76 ; 0x4c\n",
- " 552: 234e movs r3, #78 ; 0x4e\n",
- " 554: 2350 movs r3, #80 ; 0x50\n",
- " 556: 2352 movs r3, #82 ; 0x52\n",
- " 558: 2354 movs r3, #84 ; 0x54\n",
- " 55a: 2356 movs r3, #86 ; 0x56\n",
- " 55c: 2358 movs r3, #88 ; 0x58\n",
- " 55e: 235a movs r3, #90 ; 0x5a\n",
- " 560: 235c movs r3, #92 ; 0x5c\n",
- " 562: 235e movs r3, #94 ; 0x5e\n",
- " 564: 2360 movs r3, #96 ; 0x60\n",
- " 566: 2362 movs r3, #98 ; 0x62\n",
- " 568: 2364 movs r3, #100 ; 0x64\n",
- " 56a: 2366 movs r3, #102 ; 0x66\n",
- " 56c: 2368 movs r3, #104 ; 0x68\n",
- " 56e: 236a movs r3, #106 ; 0x6a\n",
- " 570: 236c movs r3, #108 ; 0x6c\n",
- " 572: 236e movs r3, #110 ; 0x6e\n",
- " 574: 2370 movs r3, #112 ; 0x70\n",
- " 576: 2372 movs r3, #114 ; 0x72\n",
- " 578: 2374 movs r3, #116 ; 0x74\n",
- " 57a: 2376 movs r3, #118 ; 0x76\n",
- " 57c: 2378 movs r3, #120 ; 0x78\n",
- " 57e: 237a movs r3, #122 ; 0x7a\n",
- " 580: 237c movs r3, #124 ; 0x7c\n",
- " 582: 237e movs r3, #126 ; 0x7e\n",
- " 584: 2380 movs r3, #128 ; 0x80\n",
- " 586: 2382 movs r3, #130 ; 0x82\n",
- " 588: 2384 movs r3, #132 ; 0x84\n",
- " 58a: 2386 movs r3, #134 ; 0x86\n",
- " 58c: 2388 movs r3, #136 ; 0x88\n",
- " 58e: 238a movs r3, #138 ; 0x8a\n",
- " 590: 238c movs r3, #140 ; 0x8c\n",
- " 592: 238e movs r3, #142 ; 0x8e\n",
- " 594: 2390 movs r3, #144 ; 0x90\n",
- " 596: 2392 movs r3, #146 ; 0x92\n",
- " 598: 2394 movs r3, #148 ; 0x94\n",
- " 59a: 2396 movs r3, #150 ; 0x96\n",
- " 59c: 2398 movs r3, #152 ; 0x98\n",
- " 59e: 239a movs r3, #154 ; 0x9a\n",
- " 5a0: 239c movs r3, #156 ; 0x9c\n",
- " 5a2: 239e movs r3, #158 ; 0x9e\n",
- " 5a4: 23a0 movs r3, #160 ; 0xa0\n",
- " 5a6: 23a2 movs r3, #162 ; 0xa2\n",
- " 5a8: 23a4 movs r3, #164 ; 0xa4\n",
- " 5aa: 23a6 movs r3, #166 ; 0xa6\n",
- " 5ac: 23a8 movs r3, #168 ; 0xa8\n",
- " 5ae: 23aa movs r3, #170 ; 0xaa\n",
- " 5b0: 23ac movs r3, #172 ; 0xac\n",
- " 5b2: 23ae movs r3, #174 ; 0xae\n",
- " 5b4: 23b0 movs r3, #176 ; 0xb0\n",
- " 5b6: 23b2 movs r3, #178 ; 0xb2\n",
- " 5b8: 23b4 movs r3, #180 ; 0xb4\n",
- " 5ba: 23b6 movs r3, #182 ; 0xb6\n",
- " 5bc: 23b8 movs r3, #184 ; 0xb8\n",
- " 5be: 23ba movs r3, #186 ; 0xba\n",
- " 5c0: 23bc movs r3, #188 ; 0xbc\n",
- " 5c2: 23be movs r3, #190 ; 0xbe\n",
- " 5c4: 23c0 movs r3, #192 ; 0xc0\n",
- " 5c6: 23c2 movs r3, #194 ; 0xc2\n",
- " 5c8: 23c4 movs r3, #196 ; 0xc4\n",
- " 5ca: 23c6 movs r3, #198 ; 0xc6\n",
- " 5cc: 23c8 movs r3, #200 ; 0xc8\n",
- " 5ce: 23ca movs r3, #202 ; 0xca\n",
- " 5d0: 23cc movs r3, #204 ; 0xcc\n",
- " 5d2: 23ce movs r3, #206 ; 0xce\n",
- " 5d4: 23d0 movs r3, #208 ; 0xd0\n",
- " 5d6: 23d2 movs r3, #210 ; 0xd2\n",
- " 5d8: 23d4 movs r3, #212 ; 0xd4\n",
- " 5da: 23d6 movs r3, #214 ; 0xd6\n",
- " 5dc: 23d8 movs r3, #216 ; 0xd8\n",
- " 5de: 23da movs r3, #218 ; 0xda\n",
- " 5e0: 23dc movs r3, #220 ; 0xdc\n",
- " 5e2: 23de movs r3, #222 ; 0xde\n",
- " 5e4: 23e0 movs r3, #224 ; 0xe0\n",
- " 5e6: 23e2 movs r3, #226 ; 0xe2\n",
- " 5e8: 23e4 movs r3, #228 ; 0xe4\n",
- " 5ea: 23e6 movs r3, #230 ; 0xe6\n",
- " 5ec: 23e8 movs r3, #232 ; 0xe8\n",
- " 5ee: 23ea movs r3, #234 ; 0xea\n",
- " 5f0: 23ec movs r3, #236 ; 0xec\n",
- " 5f2: 23ee movs r3, #238 ; 0xee\n",
- " 5f4: 23f0 movs r3, #240 ; 0xf0\n",
- " 5f6: 23f2 movs r3, #242 ; 0xf2\n",
- " 5f8: 23f4 movs r3, #244 ; 0xf4\n",
- " 5fa: 23f6 movs r3, #246 ; 0xf6\n",
- " 5fc: 23f8 movs r3, #248 ; 0xf8\n",
- " 5fe: 23fa movs r3, #250 ; 0xfa\n",
- " 600: 23fc movs r3, #252 ; 0xfc\n",
- " 602: 23fe movs r3, #254 ; 0xfe\n",
- " 604: 2300 movs r3, #0\n",
- " 606: 2302 movs r3, #2\n",
- " 608: 2304 movs r3, #4\n",
- " 60a: 2306 movs r3, #6\n",
- " 60c: 2308 movs r3, #8\n",
- " 60e: 230a movs r3, #10\n",
- " 610: 230c movs r3, #12\n",
- " 612: 230e movs r3, #14\n",
- " 614: 2310 movs r3, #16\n",
- " 616: 2312 movs r3, #18\n",
- " 618: 2314 movs r3, #20\n",
- " 61a: 2316 movs r3, #22\n",
- " 61c: 2318 movs r3, #24\n",
- " 61e: 231a movs r3, #26\n",
- " 620: 231c movs r3, #28\n",
- " 622: 231e movs r3, #30\n",
- " 624: 2320 movs r3, #32\n",
- " 626: 2322 movs r3, #34 ; 0x22\n",
- " 628: 2324 movs r3, #36 ; 0x24\n",
- " 62a: 2326 movs r3, #38 ; 0x26\n",
- " 62c: 2328 movs r3, #40 ; 0x28\n",
- " 62e: 232a movs r3, #42 ; 0x2a\n",
- " 630: 232c movs r3, #44 ; 0x2c\n",
- " 632: 232e movs r3, #46 ; 0x2e\n",
- " 634: 2330 movs r3, #48 ; 0x30\n",
- " 636: 2332 movs r3, #50 ; 0x32\n",
- " 638: 2334 movs r3, #52 ; 0x34\n",
- " 63a: 2336 movs r3, #54 ; 0x36\n",
- " 63c: 2338 movs r3, #56 ; 0x38\n",
- " 63e: 233a movs r3, #58 ; 0x3a\n",
- " 640: 233c movs r3, #60 ; 0x3c\n",
- " 642: 233e movs r3, #62 ; 0x3e\n",
- " 644: 2340 movs r3, #64 ; 0x40\n",
- " 646: 2342 movs r3, #66 ; 0x42\n",
- " 648: 2344 movs r3, #68 ; 0x44\n",
- " 64a: 2346 movs r3, #70 ; 0x46\n",
- " 64c: 2348 movs r3, #72 ; 0x48\n",
- " 64e: 234a movs r3, #74 ; 0x4a\n",
- " 650: 234c movs r3, #76 ; 0x4c\n",
- " 652: 234e movs r3, #78 ; 0x4e\n",
- " 654: 2350 movs r3, #80 ; 0x50\n",
- " 656: 2352 movs r3, #82 ; 0x52\n",
- " 658: 2354 movs r3, #84 ; 0x54\n",
- " 65a: 2356 movs r3, #86 ; 0x56\n",
- " 65c: 2358 movs r3, #88 ; 0x58\n",
- " 65e: 235a movs r3, #90 ; 0x5a\n",
- " 660: 235c movs r3, #92 ; 0x5c\n",
- " 662: 235e movs r3, #94 ; 0x5e\n",
- " 664: 2360 movs r3, #96 ; 0x60\n",
- " 666: 2362 movs r3, #98 ; 0x62\n",
- " 668: 2364 movs r3, #100 ; 0x64\n",
- " 66a: 2366 movs r3, #102 ; 0x66\n",
- " 66c: 2368 movs r3, #104 ; 0x68\n",
- " 66e: 236a movs r3, #106 ; 0x6a\n",
- " 670: 236c movs r3, #108 ; 0x6c\n",
- " 672: 236e movs r3, #110 ; 0x6e\n",
- " 674: 2370 movs r3, #112 ; 0x70\n",
- " 676: 2372 movs r3, #114 ; 0x72\n",
- " 678: 2374 movs r3, #116 ; 0x74\n",
- " 67a: 2376 movs r3, #118 ; 0x76\n",
- " 67c: 2378 movs r3, #120 ; 0x78\n",
- " 67e: 237a movs r3, #122 ; 0x7a\n",
- " 680: 237c movs r3, #124 ; 0x7c\n",
- " 682: 237e movs r3, #126 ; 0x7e\n",
- " 684: 2380 movs r3, #128 ; 0x80\n",
- " 686: 2382 movs r3, #130 ; 0x82\n",
- " 688: 2384 movs r3, #132 ; 0x84\n",
- " 68a: 2386 movs r3, #134 ; 0x86\n",
- " 68c: 2388 movs r3, #136 ; 0x88\n",
- " 68e: 238a movs r3, #138 ; 0x8a\n",
- " 690: 238c movs r3, #140 ; 0x8c\n",
- " 692: 238e movs r3, #142 ; 0x8e\n",
- " 694: 2390 movs r3, #144 ; 0x90\n",
- " 696: 2392 movs r3, #146 ; 0x92\n",
- " 698: 2394 movs r3, #148 ; 0x94\n",
- " 69a: 2396 movs r3, #150 ; 0x96\n",
- " 69c: 2398 movs r3, #152 ; 0x98\n",
- " 69e: 239a movs r3, #154 ; 0x9a\n",
- " 6a0: 239c movs r3, #156 ; 0x9c\n",
- " 6a2: 239e movs r3, #158 ; 0x9e\n",
- " 6a4: 23a0 movs r3, #160 ; 0xa0\n",
- " 6a6: 23a2 movs r3, #162 ; 0xa2\n",
- " 6a8: 23a4 movs r3, #164 ; 0xa4\n",
- " 6aa: 23a6 movs r3, #166 ; 0xa6\n",
- " 6ac: 23a8 movs r3, #168 ; 0xa8\n",
- " 6ae: 23aa movs r3, #170 ; 0xaa\n",
- " 6b0: 23ac movs r3, #172 ; 0xac\n",
- " 6b2: 23ae movs r3, #174 ; 0xae\n",
- " 6b4: 23b0 movs r3, #176 ; 0xb0\n",
- " 6b6: 23b2 movs r3, #178 ; 0xb2\n",
- " 6b8: 23b4 movs r3, #180 ; 0xb4\n",
- " 6ba: 23b6 movs r3, #182 ; 0xb6\n",
- " 6bc: 23b8 movs r3, #184 ; 0xb8\n",
- " 6be: 23ba movs r3, #186 ; 0xba\n",
- " 6c0: 23bc movs r3, #188 ; 0xbc\n",
- " 6c2: 23be movs r3, #190 ; 0xbe\n",
- " 6c4: 23c0 movs r3, #192 ; 0xc0\n",
- " 6c6: 23c2 movs r3, #194 ; 0xc2\n",
- " 6c8: 23c4 movs r3, #196 ; 0xc4\n",
- " 6ca: 23c6 movs r3, #198 ; 0xc6\n",
- " 6cc: 23c8 movs r3, #200 ; 0xc8\n",
- " 6ce: 23ca movs r3, #202 ; 0xca\n",
- " 6d0: 23cc movs r3, #204 ; 0xcc\n",
- " 6d2: 23ce movs r3, #206 ; 0xce\n",
- " 6d4: 23d0 movs r3, #208 ; 0xd0\n",
- " 6d6: 23d2 movs r3, #210 ; 0xd2\n",
- " 6d8: 23d4 movs r3, #212 ; 0xd4\n",
- " 6da: 23d6 movs r3, #214 ; 0xd6\n",
- " 6dc: 23d8 movs r3, #216 ; 0xd8\n",
- " 6de: 23da movs r3, #218 ; 0xda\n",
- " 6e0: 23dc movs r3, #220 ; 0xdc\n",
- " 6e2: 23de movs r3, #222 ; 0xde\n",
- " 6e4: 23e0 movs r3, #224 ; 0xe0\n",
- " 6e6: 23e2 movs r3, #226 ; 0xe2\n",
- " 6e8: 23e4 movs r3, #228 ; 0xe4\n",
- " 6ea: 23e6 movs r3, #230 ; 0xe6\n",
- " 6ec: 23e8 movs r3, #232 ; 0xe8\n",
- " 6ee: 23ea movs r3, #234 ; 0xea\n",
- " 6f0: 23ec movs r3, #236 ; 0xec\n",
- " 6f2: 23ee movs r3, #238 ; 0xee\n",
- " 6f4: 23f0 movs r3, #240 ; 0xf0\n",
- " 6f6: 23f2 movs r3, #242 ; 0xf2\n",
- " 6f8: 23f4 movs r3, #244 ; 0xf4\n",
- " 6fa: 23f6 movs r3, #246 ; 0xf6\n",
- " 6fc: 23f8 movs r3, #248 ; 0xf8\n",
- " 6fe: 23fa movs r3, #250 ; 0xfa\n",
- " 700: 23fc movs r3, #252 ; 0xfc\n",
- " 702: 23fe movs r3, #254 ; 0xfe\n",
- " 704: 2300 movs r3, #0\n",
- " 706: 2302 movs r3, #2\n",
- " 708: 2304 movs r3, #4\n",
- " 70a: 2306 movs r3, #6\n",
- " 70c: 2308 movs r3, #8\n",
- " 70e: 230a movs r3, #10\n",
- " 710: 230c movs r3, #12\n",
- " 712: 230e movs r3, #14\n",
- " 714: 2310 movs r3, #16\n",
- " 716: 2312 movs r3, #18\n",
- " 718: 2314 movs r3, #20\n",
- " 71a: 2316 movs r3, #22\n",
- " 71c: 2318 movs r3, #24\n",
- " 71e: 231a movs r3, #26\n",
- " 720: 231c movs r3, #28\n",
- " 722: 231e movs r3, #30\n",
- " 724: 2320 movs r3, #32\n",
- " 726: 2322 movs r3, #34 ; 0x22\n",
- " 728: 2324 movs r3, #36 ; 0x24\n",
- " 72a: 2326 movs r3, #38 ; 0x26\n",
- " 72c: 2328 movs r3, #40 ; 0x28\n",
- " 72e: 232a movs r3, #42 ; 0x2a\n",
- " 730: 232c movs r3, #44 ; 0x2c\n",
- " 732: 232e movs r3, #46 ; 0x2e\n",
- " 734: 2330 movs r3, #48 ; 0x30\n",
- " 736: 2332 movs r3, #50 ; 0x32\n",
- " 738: 2334 movs r3, #52 ; 0x34\n",
- " 73a: 2336 movs r3, #54 ; 0x36\n",
- " 73c: 2338 movs r3, #56 ; 0x38\n",
- " 73e: 233a movs r3, #58 ; 0x3a\n",
- " 740: 233c movs r3, #60 ; 0x3c\n",
- " 742: 233e movs r3, #62 ; 0x3e\n",
- " 744: 2340 movs r3, #64 ; 0x40\n",
- " 746: 2342 movs r3, #66 ; 0x42\n",
- " 748: 2344 movs r3, #68 ; 0x44\n",
- " 74a: 2346 movs r3, #70 ; 0x46\n",
- " 74c: 2348 movs r3, #72 ; 0x48\n",
- " 74e: 234a movs r3, #74 ; 0x4a\n",
- " 750: 234c movs r3, #76 ; 0x4c\n",
- " 752: 234e movs r3, #78 ; 0x4e\n",
- " 754: 2350 movs r3, #80 ; 0x50\n",
- " 756: 2352 movs r3, #82 ; 0x52\n",
- " 758: 2354 movs r3, #84 ; 0x54\n",
- " 75a: 2356 movs r3, #86 ; 0x56\n",
- " 75c: 2358 movs r3, #88 ; 0x58\n",
- " 75e: 235a movs r3, #90 ; 0x5a\n",
- " 760: 235c movs r3, #92 ; 0x5c\n",
- " 762: 235e movs r3, #94 ; 0x5e\n",
- " 764: 2360 movs r3, #96 ; 0x60\n",
- " 766: 2362 movs r3, #98 ; 0x62\n",
- " 768: 2364 movs r3, #100 ; 0x64\n",
- " 76a: 2366 movs r3, #102 ; 0x66\n",
- " 76c: 2368 movs r3, #104 ; 0x68\n",
- " 76e: 236a movs r3, #106 ; 0x6a\n",
- " 770: 236c movs r3, #108 ; 0x6c\n",
- " 772: 236e movs r3, #110 ; 0x6e\n",
- " 774: 2370 movs r3, #112 ; 0x70\n",
- " 776: 2372 movs r3, #114 ; 0x72\n",
- " 778: 2374 movs r3, #116 ; 0x74\n",
- " 77a: 2376 movs r3, #118 ; 0x76\n",
- " 77c: 2378 movs r3, #120 ; 0x78\n",
- " 77e: 237a movs r3, #122 ; 0x7a\n",
- " 780: 237c movs r3, #124 ; 0x7c\n",
- " 782: 237e movs r3, #126 ; 0x7e\n",
- " 784: 2380 movs r3, #128 ; 0x80\n",
- " 786: 2382 movs r3, #130 ; 0x82\n",
- " 788: 2384 movs r3, #132 ; 0x84\n",
- " 78a: 2386 movs r3, #134 ; 0x86\n",
- " 78c: 2388 movs r3, #136 ; 0x88\n",
- " 78e: 238a movs r3, #138 ; 0x8a\n",
- " 790: 238c movs r3, #140 ; 0x8c\n",
- " 792: 238e movs r3, #142 ; 0x8e\n",
- " 794: 2390 movs r3, #144 ; 0x90\n",
- " 796: 2392 movs r3, #146 ; 0x92\n",
- " 798: 2394 movs r3, #148 ; 0x94\n",
- " 79a: 2396 movs r3, #150 ; 0x96\n",
- " 79c: 2398 movs r3, #152 ; 0x98\n",
- " 79e: 239a movs r3, #154 ; 0x9a\n",
- " 7a0: 239c movs r3, #156 ; 0x9c\n",
- " 7a2: 239e movs r3, #158 ; 0x9e\n",
- " 7a4: 23a0 movs r3, #160 ; 0xa0\n",
- " 7a6: 23a2 movs r3, #162 ; 0xa2\n",
- " 7a8: 23a4 movs r3, #164 ; 0xa4\n",
- " 7aa: 23a6 movs r3, #166 ; 0xa6\n",
- " 7ac: 23a8 movs r3, #168 ; 0xa8\n",
- " 7ae: 23aa movs r3, #170 ; 0xaa\n",
- " 7b0: 23ac movs r3, #172 ; 0xac\n",
- " 7b2: 23ae movs r3, #174 ; 0xae\n",
- " 7b4: 23b0 movs r3, #176 ; 0xb0\n",
- " 7b6: 23b2 movs r3, #178 ; 0xb2\n",
- " 7b8: 23b4 movs r3, #180 ; 0xb4\n",
- " 7ba: 23b6 movs r3, #182 ; 0xb6\n",
- " 7bc: 23b8 movs r3, #184 ; 0xb8\n",
- " 7be: 23ba movs r3, #186 ; 0xba\n",
- " 7c0: 23bc movs r3, #188 ; 0xbc\n",
- " 7c2: 23be movs r3, #190 ; 0xbe\n",
- " 7c4: 23c0 movs r3, #192 ; 0xc0\n",
- " 7c6: 23c2 movs r3, #194 ; 0xc2\n",
- " 7c8: 23c4 movs r3, #196 ; 0xc4\n",
- " 7ca: 23c6 movs r3, #198 ; 0xc6\n",
- " 7cc: 23c8 movs r3, #200 ; 0xc8\n",
- " 7ce: 23ca movs r3, #202 ; 0xca\n",
- " 7d0: 23cc movs r3, #204 ; 0xcc\n",
- " 7d2: 23ce movs r3, #206 ; 0xce\n",
- " 7d4: 23d0 movs r3, #208 ; 0xd0\n",
- " 7d6: 23d2 movs r3, #210 ; 0xd2\n",
- " 7d8: 23d4 movs r3, #212 ; 0xd4\n",
- " 7da: 23d6 movs r3, #214 ; 0xd6\n",
- " 7dc: 23d8 movs r3, #216 ; 0xd8\n",
- " 7de: 23da movs r3, #218 ; 0xda\n",
- " 7e0: 23dc movs r3, #220 ; 0xdc\n",
- " 7e2: 23de movs r3, #222 ; 0xde\n",
- " 7e4: 23e0 movs r3, #224 ; 0xe0\n",
- " 7e6: 23e2 movs r3, #226 ; 0xe2\n",
- " 7e8: 23e4 movs r3, #228 ; 0xe4\n",
- " 7ea: 23e6 movs r3, #230 ; 0xe6\n",
- " 7ec: 23e8 movs r3, #232 ; 0xe8\n",
- " 7ee: 23ea movs r3, #234 ; 0xea\n",
- " 7f0: 23ec movs r3, #236 ; 0xec\n",
- " 7f2: 23ee movs r3, #238 ; 0xee\n",
- " 7f4: 23f0 movs r3, #240 ; 0xf0\n",
- " 7f6: 23f2 movs r3, #242 ; 0xf2\n",
- " 7f8: 23f4 movs r3, #244 ; 0xf4\n",
- " 7fa: 23f6 movs r3, #246 ; 0xf6\n",
- " 7fc: 23f8 movs r3, #248 ; 0xf8\n",
- " 7fe: 23fa movs r3, #250 ; 0xfa\n",
- " 800: 23fc movs r3, #252 ; 0xfc\n",
- " 802: 23fe movs r3, #254 ; 0xfe\n",
- " 804: 2300 movs r3, #0\n",
- " 806: 4611 mov r1, r2\n",
- nullptr
-};
-const char* const CompareAndBranchMaxResults[] = {
- " 0: b3fc cbz r4, 82 <CompareAndBranchMax+0x82>\n",
- " 2: 2300 movs r3, #0\n",
- " 4: 2302 movs r3, #2\n",
- " 6: 2304 movs r3, #4\n",
- " 8: 2306 movs r3, #6\n",
- " a: 2308 movs r3, #8\n",
- " c: 230a movs r3, #10\n",
- " e: 230c movs r3, #12\n",
- " 10: 230e movs r3, #14\n",
- " 12: 2310 movs r3, #16\n",
- " 14: 2312 movs r3, #18\n",
- " 16: 2314 movs r3, #20\n",
- " 18: 2316 movs r3, #22\n",
- " 1a: 2318 movs r3, #24\n",
- " 1c: 231a movs r3, #26\n",
- " 1e: 231c movs r3, #28\n",
- " 20: 231e movs r3, #30\n",
- " 22: 2320 movs r3, #32\n",
- " 24: 2322 movs r3, #34 ; 0x22\n",
- " 26: 2324 movs r3, #36 ; 0x24\n",
- " 28: 2326 movs r3, #38 ; 0x26\n",
- " 2a: 2328 movs r3, #40 ; 0x28\n",
- " 2c: 232a movs r3, #42 ; 0x2a\n",
- " 2e: 232c movs r3, #44 ; 0x2c\n",
- " 30: 232e movs r3, #46 ; 0x2e\n",
- " 32: 2330 movs r3, #48 ; 0x30\n",
- " 34: 2332 movs r3, #50 ; 0x32\n",
- " 36: 2334 movs r3, #52 ; 0x34\n",
- " 38: 2336 movs r3, #54 ; 0x36\n",
- " 3a: 2338 movs r3, #56 ; 0x38\n",
- " 3c: 233a movs r3, #58 ; 0x3a\n",
- " 3e: 233c movs r3, #60 ; 0x3c\n",
- " 40: 233e movs r3, #62 ; 0x3e\n",
- " 42: 2340 movs r3, #64 ; 0x40\n",
- " 44: 2342 movs r3, #66 ; 0x42\n",
- " 46: 2344 movs r3, #68 ; 0x44\n",
- " 48: 2346 movs r3, #70 ; 0x46\n",
- " 4a: 2348 movs r3, #72 ; 0x48\n",
- " 4c: 234a movs r3, #74 ; 0x4a\n",
- " 4e: 234c movs r3, #76 ; 0x4c\n",
- " 50: 234e movs r3, #78 ; 0x4e\n",
- " 52: 2350 movs r3, #80 ; 0x50\n",
- " 54: 2352 movs r3, #82 ; 0x52\n",
- " 56: 2354 movs r3, #84 ; 0x54\n",
- " 58: 2356 movs r3, #86 ; 0x56\n",
- " 5a: 2358 movs r3, #88 ; 0x58\n",
- " 5c: 235a movs r3, #90 ; 0x5a\n",
- " 5e: 235c movs r3, #92 ; 0x5c\n",
- " 60: 235e movs r3, #94 ; 0x5e\n",
- " 62: 2360 movs r3, #96 ; 0x60\n",
- " 64: 2362 movs r3, #98 ; 0x62\n",
- " 66: 2364 movs r3, #100 ; 0x64\n",
- " 68: 2366 movs r3, #102 ; 0x66\n",
- " 6a: 2368 movs r3, #104 ; 0x68\n",
- " 6c: 236a movs r3, #106 ; 0x6a\n",
- " 6e: 236c movs r3, #108 ; 0x6c\n",
- " 70: 236e movs r3, #110 ; 0x6e\n",
- " 72: 2370 movs r3, #112 ; 0x70\n",
- " 74: 2372 movs r3, #114 ; 0x72\n",
- " 76: 2374 movs r3, #116 ; 0x74\n",
- " 78: 2376 movs r3, #118 ; 0x76\n",
- " 7a: 2378 movs r3, #120 ; 0x78\n",
- " 7c: 237a movs r3, #122 ; 0x7a\n",
- " 7e: 237c movs r3, #124 ; 0x7c\n",
- " 80: 237e movs r3, #126 ; 0x7e\n",
- " 82: 4611 mov r1, r2\n",
- nullptr
-};
-const char* const CompareAndBranchRelocation16Results[] = {
- " 0: 2c00 cmp r4, #0\n",
- " 2: d040 beq.n 86 <CompareAndBranchRelocation16+0x86>\n",
- " 4: 2300 movs r3, #0\n",
- " 6: 2302 movs r3, #2\n",
- " 8: 2304 movs r3, #4\n",
- " a: 2306 movs r3, #6\n",
- " c: 2308 movs r3, #8\n",
- " e: 230a movs r3, #10\n",
- " 10: 230c movs r3, #12\n",
- " 12: 230e movs r3, #14\n",
- " 14: 2310 movs r3, #16\n",
- " 16: 2312 movs r3, #18\n",
- " 18: 2314 movs r3, #20\n",
- " 1a: 2316 movs r3, #22\n",
- " 1c: 2318 movs r3, #24\n",
- " 1e: 231a movs r3, #26\n",
- " 20: 231c movs r3, #28\n",
- " 22: 231e movs r3, #30\n",
- " 24: 2320 movs r3, #32\n",
- " 26: 2322 movs r3, #34 ; 0x22\n",
- " 28: 2324 movs r3, #36 ; 0x24\n",
- " 2a: 2326 movs r3, #38 ; 0x26\n",
- " 2c: 2328 movs r3, #40 ; 0x28\n",
- " 2e: 232a movs r3, #42 ; 0x2a\n",
- " 30: 232c movs r3, #44 ; 0x2c\n",
- " 32: 232e movs r3, #46 ; 0x2e\n",
- " 34: 2330 movs r3, #48 ; 0x30\n",
- " 36: 2332 movs r3, #50 ; 0x32\n",
- " 38: 2334 movs r3, #52 ; 0x34\n",
- " 3a: 2336 movs r3, #54 ; 0x36\n",
- " 3c: 2338 movs r3, #56 ; 0x38\n",
- " 3e: 233a movs r3, #58 ; 0x3a\n",
- " 40: 233c movs r3, #60 ; 0x3c\n",
- " 42: 233e movs r3, #62 ; 0x3e\n",
- " 44: 2340 movs r3, #64 ; 0x40\n",
- " 46: 2342 movs r3, #66 ; 0x42\n",
- " 48: 2344 movs r3, #68 ; 0x44\n",
- " 4a: 2346 movs r3, #70 ; 0x46\n",
- " 4c: 2348 movs r3, #72 ; 0x48\n",
- " 4e: 234a movs r3, #74 ; 0x4a\n",
- " 50: 234c movs r3, #76 ; 0x4c\n",
- " 52: 234e movs r3, #78 ; 0x4e\n",
- " 54: 2350 movs r3, #80 ; 0x50\n",
- " 56: 2352 movs r3, #82 ; 0x52\n",
- " 58: 2354 movs r3, #84 ; 0x54\n",
- " 5a: 2356 movs r3, #86 ; 0x56\n",
- " 5c: 2358 movs r3, #88 ; 0x58\n",
- " 5e: 235a movs r3, #90 ; 0x5a\n",
- " 60: 235c movs r3, #92 ; 0x5c\n",
- " 62: 235e movs r3, #94 ; 0x5e\n",
- " 64: 2360 movs r3, #96 ; 0x60\n",
- " 66: 2362 movs r3, #98 ; 0x62\n",
- " 68: 2364 movs r3, #100 ; 0x64\n",
- " 6a: 2366 movs r3, #102 ; 0x66\n",
- " 6c: 2368 movs r3, #104 ; 0x68\n",
- " 6e: 236a movs r3, #106 ; 0x6a\n",
- " 70: 236c movs r3, #108 ; 0x6c\n",
- " 72: 236e movs r3, #110 ; 0x6e\n",
- " 74: 2370 movs r3, #112 ; 0x70\n",
- " 76: 2372 movs r3, #114 ; 0x72\n",
- " 78: 2374 movs r3, #116 ; 0x74\n",
- " 7a: 2376 movs r3, #118 ; 0x76\n",
- " 7c: 2378 movs r3, #120 ; 0x78\n",
- " 7e: 237a movs r3, #122 ; 0x7a\n",
- " 80: 237c movs r3, #124 ; 0x7c\n",
- " 82: 237e movs r3, #126 ; 0x7e\n",
- " 84: 2380 movs r3, #128 ; 0x80\n",
- " 86: 4611 mov r1, r2\n",
- nullptr
-};
-const char* const CompareAndBranchRelocation32Results[] = {
- " 0: 2c00 cmp r4, #0\n",
- " 2: f000 8401 beq.w 808 <CompareAndBranchRelocation32+0x808>\n",
- " 6: 2300 movs r3, #0\n",
- " 8: 2302 movs r3, #2\n",
- " a: 2304 movs r3, #4\n",
- " c: 2306 movs r3, #6\n",
- " e: 2308 movs r3, #8\n",
- " 10: 230a movs r3, #10\n",
- " 12: 230c movs r3, #12\n",
- " 14: 230e movs r3, #14\n",
- " 16: 2310 movs r3, #16\n",
- " 18: 2312 movs r3, #18\n",
- " 1a: 2314 movs r3, #20\n",
- " 1c: 2316 movs r3, #22\n",
- " 1e: 2318 movs r3, #24\n",
- " 20: 231a movs r3, #26\n",
- " 22: 231c movs r3, #28\n",
- " 24: 231e movs r3, #30\n",
- " 26: 2320 movs r3, #32\n",
- " 28: 2322 movs r3, #34 ; 0x22\n",
- " 2a: 2324 movs r3, #36 ; 0x24\n",
- " 2c: 2326 movs r3, #38 ; 0x26\n",
- " 2e: 2328 movs r3, #40 ; 0x28\n",
- " 30: 232a movs r3, #42 ; 0x2a\n",
- " 32: 232c movs r3, #44 ; 0x2c\n",
- " 34: 232e movs r3, #46 ; 0x2e\n",
- " 36: 2330 movs r3, #48 ; 0x30\n",
- " 38: 2332 movs r3, #50 ; 0x32\n",
- " 3a: 2334 movs r3, #52 ; 0x34\n",
- " 3c: 2336 movs r3, #54 ; 0x36\n",
- " 3e: 2338 movs r3, #56 ; 0x38\n",
- " 40: 233a movs r3, #58 ; 0x3a\n",
- " 42: 233c movs r3, #60 ; 0x3c\n",
- " 44: 233e movs r3, #62 ; 0x3e\n",
- " 46: 2340 movs r3, #64 ; 0x40\n",
- " 48: 2342 movs r3, #66 ; 0x42\n",
- " 4a: 2344 movs r3, #68 ; 0x44\n",
- " 4c: 2346 movs r3, #70 ; 0x46\n",
- " 4e: 2348 movs r3, #72 ; 0x48\n",
- " 50: 234a movs r3, #74 ; 0x4a\n",
- " 52: 234c movs r3, #76 ; 0x4c\n",
- " 54: 234e movs r3, #78 ; 0x4e\n",
- " 56: 2350 movs r3, #80 ; 0x50\n",
- " 58: 2352 movs r3, #82 ; 0x52\n",
- " 5a: 2354 movs r3, #84 ; 0x54\n",
- " 5c: 2356 movs r3, #86 ; 0x56\n",
- " 5e: 2358 movs r3, #88 ; 0x58\n",
- " 60: 235a movs r3, #90 ; 0x5a\n",
- " 62: 235c movs r3, #92 ; 0x5c\n",
- " 64: 235e movs r3, #94 ; 0x5e\n",
- " 66: 2360 movs r3, #96 ; 0x60\n",
- " 68: 2362 movs r3, #98 ; 0x62\n",
- " 6a: 2364 movs r3, #100 ; 0x64\n",
- " 6c: 2366 movs r3, #102 ; 0x66\n",
- " 6e: 2368 movs r3, #104 ; 0x68\n",
- " 70: 236a movs r3, #106 ; 0x6a\n",
- " 72: 236c movs r3, #108 ; 0x6c\n",
- " 74: 236e movs r3, #110 ; 0x6e\n",
- " 76: 2370 movs r3, #112 ; 0x70\n",
- " 78: 2372 movs r3, #114 ; 0x72\n",
- " 7a: 2374 movs r3, #116 ; 0x74\n",
- " 7c: 2376 movs r3, #118 ; 0x76\n",
- " 7e: 2378 movs r3, #120 ; 0x78\n",
- " 80: 237a movs r3, #122 ; 0x7a\n",
- " 82: 237c movs r3, #124 ; 0x7c\n",
- " 84: 237e movs r3, #126 ; 0x7e\n",
- " 86: 2380 movs r3, #128 ; 0x80\n",
- " 88: 2382 movs r3, #130 ; 0x82\n",
- " 8a: 2384 movs r3, #132 ; 0x84\n",
- " 8c: 2386 movs r3, #134 ; 0x86\n",
- " 8e: 2388 movs r3, #136 ; 0x88\n",
- " 90: 238a movs r3, #138 ; 0x8a\n",
- " 92: 238c movs r3, #140 ; 0x8c\n",
- " 94: 238e movs r3, #142 ; 0x8e\n",
- " 96: 2390 movs r3, #144 ; 0x90\n",
- " 98: 2392 movs r3, #146 ; 0x92\n",
- " 9a: 2394 movs r3, #148 ; 0x94\n",
- " 9c: 2396 movs r3, #150 ; 0x96\n",
- " 9e: 2398 movs r3, #152 ; 0x98\n",
- " a0: 239a movs r3, #154 ; 0x9a\n",
- " a2: 239c movs r3, #156 ; 0x9c\n",
- " a4: 239e movs r3, #158 ; 0x9e\n",
- " a6: 23a0 movs r3, #160 ; 0xa0\n",
- " a8: 23a2 movs r3, #162 ; 0xa2\n",
- " aa: 23a4 movs r3, #164 ; 0xa4\n",
- " ac: 23a6 movs r3, #166 ; 0xa6\n",
- " ae: 23a8 movs r3, #168 ; 0xa8\n",
- " b0: 23aa movs r3, #170 ; 0xaa\n",
- " b2: 23ac movs r3, #172 ; 0xac\n",
- " b4: 23ae movs r3, #174 ; 0xae\n",
- " b6: 23b0 movs r3, #176 ; 0xb0\n",
- " b8: 23b2 movs r3, #178 ; 0xb2\n",
- " ba: 23b4 movs r3, #180 ; 0xb4\n",
- " bc: 23b6 movs r3, #182 ; 0xb6\n",
- " be: 23b8 movs r3, #184 ; 0xb8\n",
- " c0: 23ba movs r3, #186 ; 0xba\n",
- " c2: 23bc movs r3, #188 ; 0xbc\n",
- " c4: 23be movs r3, #190 ; 0xbe\n",
- " c6: 23c0 movs r3, #192 ; 0xc0\n",
- " c8: 23c2 movs r3, #194 ; 0xc2\n",
- " ca: 23c4 movs r3, #196 ; 0xc4\n",
- " cc: 23c6 movs r3, #198 ; 0xc6\n",
- " ce: 23c8 movs r3, #200 ; 0xc8\n",
- " d0: 23ca movs r3, #202 ; 0xca\n",
- " d2: 23cc movs r3, #204 ; 0xcc\n",
- " d4: 23ce movs r3, #206 ; 0xce\n",
- " d6: 23d0 movs r3, #208 ; 0xd0\n",
- " d8: 23d2 movs r3, #210 ; 0xd2\n",
- " da: 23d4 movs r3, #212 ; 0xd4\n",
- " dc: 23d6 movs r3, #214 ; 0xd6\n",
- " de: 23d8 movs r3, #216 ; 0xd8\n",
- " e0: 23da movs r3, #218 ; 0xda\n",
- " e2: 23dc movs r3, #220 ; 0xdc\n",
- " e4: 23de movs r3, #222 ; 0xde\n",
- " e6: 23e0 movs r3, #224 ; 0xe0\n",
- " e8: 23e2 movs r3, #226 ; 0xe2\n",
- " ea: 23e4 movs r3, #228 ; 0xe4\n",
- " ec: 23e6 movs r3, #230 ; 0xe6\n",
- " ee: 23e8 movs r3, #232 ; 0xe8\n",
- " f0: 23ea movs r3, #234 ; 0xea\n",
- " f2: 23ec movs r3, #236 ; 0xec\n",
- " f4: 23ee movs r3, #238 ; 0xee\n",
- " f6: 23f0 movs r3, #240 ; 0xf0\n",
- " f8: 23f2 movs r3, #242 ; 0xf2\n",
- " fa: 23f4 movs r3, #244 ; 0xf4\n",
- " fc: 23f6 movs r3, #246 ; 0xf6\n",
- " fe: 23f8 movs r3, #248 ; 0xf8\n",
- " 100: 23fa movs r3, #250 ; 0xfa\n",
- " 102: 23fc movs r3, #252 ; 0xfc\n",
- " 104: 23fe movs r3, #254 ; 0xfe\n",
- " 106: 2300 movs r3, #0\n",
- " 108: 2302 movs r3, #2\n",
- " 10a: 2304 movs r3, #4\n",
- " 10c: 2306 movs r3, #6\n",
- " 10e: 2308 movs r3, #8\n",
- " 110: 230a movs r3, #10\n",
- " 112: 230c movs r3, #12\n",
- " 114: 230e movs r3, #14\n",
- " 116: 2310 movs r3, #16\n",
- " 118: 2312 movs r3, #18\n",
- " 11a: 2314 movs r3, #20\n",
- " 11c: 2316 movs r3, #22\n",
- " 11e: 2318 movs r3, #24\n",
- " 120: 231a movs r3, #26\n",
- " 122: 231c movs r3, #28\n",
- " 124: 231e movs r3, #30\n",
- " 126: 2320 movs r3, #32\n",
- " 128: 2322 movs r3, #34 ; 0x22\n",
- " 12a: 2324 movs r3, #36 ; 0x24\n",
- " 12c: 2326 movs r3, #38 ; 0x26\n",
- " 12e: 2328 movs r3, #40 ; 0x28\n",
- " 130: 232a movs r3, #42 ; 0x2a\n",
- " 132: 232c movs r3, #44 ; 0x2c\n",
- " 134: 232e movs r3, #46 ; 0x2e\n",
- " 136: 2330 movs r3, #48 ; 0x30\n",
- " 138: 2332 movs r3, #50 ; 0x32\n",
- " 13a: 2334 movs r3, #52 ; 0x34\n",
- " 13c: 2336 movs r3, #54 ; 0x36\n",
- " 13e: 2338 movs r3, #56 ; 0x38\n",
- " 140: 233a movs r3, #58 ; 0x3a\n",
- " 142: 233c movs r3, #60 ; 0x3c\n",
- " 144: 233e movs r3, #62 ; 0x3e\n",
- " 146: 2340 movs r3, #64 ; 0x40\n",
- " 148: 2342 movs r3, #66 ; 0x42\n",
- " 14a: 2344 movs r3, #68 ; 0x44\n",
- " 14c: 2346 movs r3, #70 ; 0x46\n",
- " 14e: 2348 movs r3, #72 ; 0x48\n",
- " 150: 234a movs r3, #74 ; 0x4a\n",
- " 152: 234c movs r3, #76 ; 0x4c\n",
- " 154: 234e movs r3, #78 ; 0x4e\n",
- " 156: 2350 movs r3, #80 ; 0x50\n",
- " 158: 2352 movs r3, #82 ; 0x52\n",
- " 15a: 2354 movs r3, #84 ; 0x54\n",
- " 15c: 2356 movs r3, #86 ; 0x56\n",
- " 15e: 2358 movs r3, #88 ; 0x58\n",
- " 160: 235a movs r3, #90 ; 0x5a\n",
- " 162: 235c movs r3, #92 ; 0x5c\n",
- " 164: 235e movs r3, #94 ; 0x5e\n",
- " 166: 2360 movs r3, #96 ; 0x60\n",
- " 168: 2362 movs r3, #98 ; 0x62\n",
- " 16a: 2364 movs r3, #100 ; 0x64\n",
- " 16c: 2366 movs r3, #102 ; 0x66\n",
- " 16e: 2368 movs r3, #104 ; 0x68\n",
- " 170: 236a movs r3, #106 ; 0x6a\n",
- " 172: 236c movs r3, #108 ; 0x6c\n",
- " 174: 236e movs r3, #110 ; 0x6e\n",
- " 176: 2370 movs r3, #112 ; 0x70\n",
- " 178: 2372 movs r3, #114 ; 0x72\n",
- " 17a: 2374 movs r3, #116 ; 0x74\n",
- " 17c: 2376 movs r3, #118 ; 0x76\n",
- " 17e: 2378 movs r3, #120 ; 0x78\n",
- " 180: 237a movs r3, #122 ; 0x7a\n",
- " 182: 237c movs r3, #124 ; 0x7c\n",
- " 184: 237e movs r3, #126 ; 0x7e\n",
- " 186: 2380 movs r3, #128 ; 0x80\n",
- " 188: 2382 movs r3, #130 ; 0x82\n",
- " 18a: 2384 movs r3, #132 ; 0x84\n",
- " 18c: 2386 movs r3, #134 ; 0x86\n",
- " 18e: 2388 movs r3, #136 ; 0x88\n",
- " 190: 238a movs r3, #138 ; 0x8a\n",
- " 192: 238c movs r3, #140 ; 0x8c\n",
- " 194: 238e movs r3, #142 ; 0x8e\n",
- " 196: 2390 movs r3, #144 ; 0x90\n",
- " 198: 2392 movs r3, #146 ; 0x92\n",
- " 19a: 2394 movs r3, #148 ; 0x94\n",
- " 19c: 2396 movs r3, #150 ; 0x96\n",
- " 19e: 2398 movs r3, #152 ; 0x98\n",
- " 1a0: 239a movs r3, #154 ; 0x9a\n",
- " 1a2: 239c movs r3, #156 ; 0x9c\n",
- " 1a4: 239e movs r3, #158 ; 0x9e\n",
- " 1a6: 23a0 movs r3, #160 ; 0xa0\n",
- " 1a8: 23a2 movs r3, #162 ; 0xa2\n",
- " 1aa: 23a4 movs r3, #164 ; 0xa4\n",
- " 1ac: 23a6 movs r3, #166 ; 0xa6\n",
- " 1ae: 23a8 movs r3, #168 ; 0xa8\n",
- " 1b0: 23aa movs r3, #170 ; 0xaa\n",
- " 1b2: 23ac movs r3, #172 ; 0xac\n",
- " 1b4: 23ae movs r3, #174 ; 0xae\n",
- " 1b6: 23b0 movs r3, #176 ; 0xb0\n",
- " 1b8: 23b2 movs r3, #178 ; 0xb2\n",
- " 1ba: 23b4 movs r3, #180 ; 0xb4\n",
- " 1bc: 23b6 movs r3, #182 ; 0xb6\n",
- " 1be: 23b8 movs r3, #184 ; 0xb8\n",
- " 1c0: 23ba movs r3, #186 ; 0xba\n",
- " 1c2: 23bc movs r3, #188 ; 0xbc\n",
- " 1c4: 23be movs r3, #190 ; 0xbe\n",
- " 1c6: 23c0 movs r3, #192 ; 0xc0\n",
- " 1c8: 23c2 movs r3, #194 ; 0xc2\n",
- " 1ca: 23c4 movs r3, #196 ; 0xc4\n",
- " 1cc: 23c6 movs r3, #198 ; 0xc6\n",
- " 1ce: 23c8 movs r3, #200 ; 0xc8\n",
- " 1d0: 23ca movs r3, #202 ; 0xca\n",
- " 1d2: 23cc movs r3, #204 ; 0xcc\n",
- " 1d4: 23ce movs r3, #206 ; 0xce\n",
- " 1d6: 23d0 movs r3, #208 ; 0xd0\n",
- " 1d8: 23d2 movs r3, #210 ; 0xd2\n",
- " 1da: 23d4 movs r3, #212 ; 0xd4\n",
- " 1dc: 23d6 movs r3, #214 ; 0xd6\n",
- " 1de: 23d8 movs r3, #216 ; 0xd8\n",
- " 1e0: 23da movs r3, #218 ; 0xda\n",
- " 1e2: 23dc movs r3, #220 ; 0xdc\n",
- " 1e4: 23de movs r3, #222 ; 0xde\n",
- " 1e6: 23e0 movs r3, #224 ; 0xe0\n",
- " 1e8: 23e2 movs r3, #226 ; 0xe2\n",
- " 1ea: 23e4 movs r3, #228 ; 0xe4\n",
- " 1ec: 23e6 movs r3, #230 ; 0xe6\n",
- " 1ee: 23e8 movs r3, #232 ; 0xe8\n",
- " 1f0: 23ea movs r3, #234 ; 0xea\n",
- " 1f2: 23ec movs r3, #236 ; 0xec\n",
- " 1f4: 23ee movs r3, #238 ; 0xee\n",
- " 1f6: 23f0 movs r3, #240 ; 0xf0\n",
- " 1f8: 23f2 movs r3, #242 ; 0xf2\n",
- " 1fa: 23f4 movs r3, #244 ; 0xf4\n",
- " 1fc: 23f6 movs r3, #246 ; 0xf6\n",
- " 1fe: 23f8 movs r3, #248 ; 0xf8\n",
- " 200: 23fa movs r3, #250 ; 0xfa\n",
- " 202: 23fc movs r3, #252 ; 0xfc\n",
- " 204: 23fe movs r3, #254 ; 0xfe\n",
- " 206: 2300 movs r3, #0\n",
- " 208: 2302 movs r3, #2\n",
- " 20a: 2304 movs r3, #4\n",
- " 20c: 2306 movs r3, #6\n",
- " 20e: 2308 movs r3, #8\n",
- " 210: 230a movs r3, #10\n",
- " 212: 230c movs r3, #12\n",
- " 214: 230e movs r3, #14\n",
- " 216: 2310 movs r3, #16\n",
- " 218: 2312 movs r3, #18\n",
- " 21a: 2314 movs r3, #20\n",
- " 21c: 2316 movs r3, #22\n",
- " 21e: 2318 movs r3, #24\n",
- " 220: 231a movs r3, #26\n",
- " 222: 231c movs r3, #28\n",
- " 224: 231e movs r3, #30\n",
- " 226: 2320 movs r3, #32\n",
- " 228: 2322 movs r3, #34 ; 0x22\n",
- " 22a: 2324 movs r3, #36 ; 0x24\n",
- " 22c: 2326 movs r3, #38 ; 0x26\n",
- " 22e: 2328 movs r3, #40 ; 0x28\n",
- " 230: 232a movs r3, #42 ; 0x2a\n",
- " 232: 232c movs r3, #44 ; 0x2c\n",
- " 234: 232e movs r3, #46 ; 0x2e\n",
- " 236: 2330 movs r3, #48 ; 0x30\n",
- " 238: 2332 movs r3, #50 ; 0x32\n",
- " 23a: 2334 movs r3, #52 ; 0x34\n",
- " 23c: 2336 movs r3, #54 ; 0x36\n",
- " 23e: 2338 movs r3, #56 ; 0x38\n",
- " 240: 233a movs r3, #58 ; 0x3a\n",
- " 242: 233c movs r3, #60 ; 0x3c\n",
- " 244: 233e movs r3, #62 ; 0x3e\n",
- " 246: 2340 movs r3, #64 ; 0x40\n",
- " 248: 2342 movs r3, #66 ; 0x42\n",
- " 24a: 2344 movs r3, #68 ; 0x44\n",
- " 24c: 2346 movs r3, #70 ; 0x46\n",
- " 24e: 2348 movs r3, #72 ; 0x48\n",
- " 250: 234a movs r3, #74 ; 0x4a\n",
- " 252: 234c movs r3, #76 ; 0x4c\n",
- " 254: 234e movs r3, #78 ; 0x4e\n",
- " 256: 2350 movs r3, #80 ; 0x50\n",
- " 258: 2352 movs r3, #82 ; 0x52\n",
- " 25a: 2354 movs r3, #84 ; 0x54\n",
- " 25c: 2356 movs r3, #86 ; 0x56\n",
- " 25e: 2358 movs r3, #88 ; 0x58\n",
- " 260: 235a movs r3, #90 ; 0x5a\n",
- " 262: 235c movs r3, #92 ; 0x5c\n",
- " 264: 235e movs r3, #94 ; 0x5e\n",
- " 266: 2360 movs r3, #96 ; 0x60\n",
- " 268: 2362 movs r3, #98 ; 0x62\n",
- " 26a: 2364 movs r3, #100 ; 0x64\n",
- " 26c: 2366 movs r3, #102 ; 0x66\n",
- " 26e: 2368 movs r3, #104 ; 0x68\n",
- " 270: 236a movs r3, #106 ; 0x6a\n",
- " 272: 236c movs r3, #108 ; 0x6c\n",
- " 274: 236e movs r3, #110 ; 0x6e\n",
- " 276: 2370 movs r3, #112 ; 0x70\n",
- " 278: 2372 movs r3, #114 ; 0x72\n",
- " 27a: 2374 movs r3, #116 ; 0x74\n",
- " 27c: 2376 movs r3, #118 ; 0x76\n",
- " 27e: 2378 movs r3, #120 ; 0x78\n",
- " 280: 237a movs r3, #122 ; 0x7a\n",
- " 282: 237c movs r3, #124 ; 0x7c\n",
- " 284: 237e movs r3, #126 ; 0x7e\n",
- " 286: 2380 movs r3, #128 ; 0x80\n",
- " 288: 2382 movs r3, #130 ; 0x82\n",
- " 28a: 2384 movs r3, #132 ; 0x84\n",
- " 28c: 2386 movs r3, #134 ; 0x86\n",
- " 28e: 2388 movs r3, #136 ; 0x88\n",
- " 290: 238a movs r3, #138 ; 0x8a\n",
- " 292: 238c movs r3, #140 ; 0x8c\n",
- " 294: 238e movs r3, #142 ; 0x8e\n",
- " 296: 2390 movs r3, #144 ; 0x90\n",
- " 298: 2392 movs r3, #146 ; 0x92\n",
- " 29a: 2394 movs r3, #148 ; 0x94\n",
- " 29c: 2396 movs r3, #150 ; 0x96\n",
- " 29e: 2398 movs r3, #152 ; 0x98\n",
- " 2a0: 239a movs r3, #154 ; 0x9a\n",
- " 2a2: 239c movs r3, #156 ; 0x9c\n",
- " 2a4: 239e movs r3, #158 ; 0x9e\n",
- " 2a6: 23a0 movs r3, #160 ; 0xa0\n",
- " 2a8: 23a2 movs r3, #162 ; 0xa2\n",
- " 2aa: 23a4 movs r3, #164 ; 0xa4\n",
- " 2ac: 23a6 movs r3, #166 ; 0xa6\n",
- " 2ae: 23a8 movs r3, #168 ; 0xa8\n",
- " 2b0: 23aa movs r3, #170 ; 0xaa\n",
- " 2b2: 23ac movs r3, #172 ; 0xac\n",
- " 2b4: 23ae movs r3, #174 ; 0xae\n",
- " 2b6: 23b0 movs r3, #176 ; 0xb0\n",
- " 2b8: 23b2 movs r3, #178 ; 0xb2\n",
- " 2ba: 23b4 movs r3, #180 ; 0xb4\n",
- " 2bc: 23b6 movs r3, #182 ; 0xb6\n",
- " 2be: 23b8 movs r3, #184 ; 0xb8\n",
- " 2c0: 23ba movs r3, #186 ; 0xba\n",
- " 2c2: 23bc movs r3, #188 ; 0xbc\n",
- " 2c4: 23be movs r3, #190 ; 0xbe\n",
- " 2c6: 23c0 movs r3, #192 ; 0xc0\n",
- " 2c8: 23c2 movs r3, #194 ; 0xc2\n",
- " 2ca: 23c4 movs r3, #196 ; 0xc4\n",
- " 2cc: 23c6 movs r3, #198 ; 0xc6\n",
- " 2ce: 23c8 movs r3, #200 ; 0xc8\n",
- " 2d0: 23ca movs r3, #202 ; 0xca\n",
- " 2d2: 23cc movs r3, #204 ; 0xcc\n",
- " 2d4: 23ce movs r3, #206 ; 0xce\n",
- " 2d6: 23d0 movs r3, #208 ; 0xd0\n",
- " 2d8: 23d2 movs r3, #210 ; 0xd2\n",
- " 2da: 23d4 movs r3, #212 ; 0xd4\n",
- " 2dc: 23d6 movs r3, #214 ; 0xd6\n",
- " 2de: 23d8 movs r3, #216 ; 0xd8\n",
- " 2e0: 23da movs r3, #218 ; 0xda\n",
- " 2e2: 23dc movs r3, #220 ; 0xdc\n",
- " 2e4: 23de movs r3, #222 ; 0xde\n",
- " 2e6: 23e0 movs r3, #224 ; 0xe0\n",
- " 2e8: 23e2 movs r3, #226 ; 0xe2\n",
- " 2ea: 23e4 movs r3, #228 ; 0xe4\n",
- " 2ec: 23e6 movs r3, #230 ; 0xe6\n",
- " 2ee: 23e8 movs r3, #232 ; 0xe8\n",
- " 2f0: 23ea movs r3, #234 ; 0xea\n",
- " 2f2: 23ec movs r3, #236 ; 0xec\n",
- " 2f4: 23ee movs r3, #238 ; 0xee\n",
- " 2f6: 23f0 movs r3, #240 ; 0xf0\n",
- " 2f8: 23f2 movs r3, #242 ; 0xf2\n",
- " 2fa: 23f4 movs r3, #244 ; 0xf4\n",
- " 2fc: 23f6 movs r3, #246 ; 0xf6\n",
- " 2fe: 23f8 movs r3, #248 ; 0xf8\n",
- " 300: 23fa movs r3, #250 ; 0xfa\n",
- " 302: 23fc movs r3, #252 ; 0xfc\n",
- " 304: 23fe movs r3, #254 ; 0xfe\n",
- " 306: 2300 movs r3, #0\n",
- " 308: 2302 movs r3, #2\n",
- " 30a: 2304 movs r3, #4\n",
- " 30c: 2306 movs r3, #6\n",
- " 30e: 2308 movs r3, #8\n",
- " 310: 230a movs r3, #10\n",
- " 312: 230c movs r3, #12\n",
- " 314: 230e movs r3, #14\n",
- " 316: 2310 movs r3, #16\n",
- " 318: 2312 movs r3, #18\n",
- " 31a: 2314 movs r3, #20\n",
- " 31c: 2316 movs r3, #22\n",
- " 31e: 2318 movs r3, #24\n",
- " 320: 231a movs r3, #26\n",
- " 322: 231c movs r3, #28\n",
- " 324: 231e movs r3, #30\n",
- " 326: 2320 movs r3, #32\n",
- " 328: 2322 movs r3, #34 ; 0x22\n",
- " 32a: 2324 movs r3, #36 ; 0x24\n",
- " 32c: 2326 movs r3, #38 ; 0x26\n",
- " 32e: 2328 movs r3, #40 ; 0x28\n",
- " 330: 232a movs r3, #42 ; 0x2a\n",
- " 332: 232c movs r3, #44 ; 0x2c\n",
- " 334: 232e movs r3, #46 ; 0x2e\n",
- " 336: 2330 movs r3, #48 ; 0x30\n",
- " 338: 2332 movs r3, #50 ; 0x32\n",
- " 33a: 2334 movs r3, #52 ; 0x34\n",
- " 33c: 2336 movs r3, #54 ; 0x36\n",
- " 33e: 2338 movs r3, #56 ; 0x38\n",
- " 340: 233a movs r3, #58 ; 0x3a\n",
- " 342: 233c movs r3, #60 ; 0x3c\n",
- " 344: 233e movs r3, #62 ; 0x3e\n",
- " 346: 2340 movs r3, #64 ; 0x40\n",
- " 348: 2342 movs r3, #66 ; 0x42\n",
- " 34a: 2344 movs r3, #68 ; 0x44\n",
- " 34c: 2346 movs r3, #70 ; 0x46\n",
- " 34e: 2348 movs r3, #72 ; 0x48\n",
- " 350: 234a movs r3, #74 ; 0x4a\n",
- " 352: 234c movs r3, #76 ; 0x4c\n",
- " 354: 234e movs r3, #78 ; 0x4e\n",
- " 356: 2350 movs r3, #80 ; 0x50\n",
- " 358: 2352 movs r3, #82 ; 0x52\n",
- " 35a: 2354 movs r3, #84 ; 0x54\n",
- " 35c: 2356 movs r3, #86 ; 0x56\n",
- " 35e: 2358 movs r3, #88 ; 0x58\n",
- " 360: 235a movs r3, #90 ; 0x5a\n",
- " 362: 235c movs r3, #92 ; 0x5c\n",
- " 364: 235e movs r3, #94 ; 0x5e\n",
- " 366: 2360 movs r3, #96 ; 0x60\n",
- " 368: 2362 movs r3, #98 ; 0x62\n",
- " 36a: 2364 movs r3, #100 ; 0x64\n",
- " 36c: 2366 movs r3, #102 ; 0x66\n",
- " 36e: 2368 movs r3, #104 ; 0x68\n",
- " 370: 236a movs r3, #106 ; 0x6a\n",
- " 372: 236c movs r3, #108 ; 0x6c\n",
- " 374: 236e movs r3, #110 ; 0x6e\n",
- " 376: 2370 movs r3, #112 ; 0x70\n",
- " 378: 2372 movs r3, #114 ; 0x72\n",
- " 37a: 2374 movs r3, #116 ; 0x74\n",
- " 37c: 2376 movs r3, #118 ; 0x76\n",
- " 37e: 2378 movs r3, #120 ; 0x78\n",
- " 380: 237a movs r3, #122 ; 0x7a\n",
- " 382: 237c movs r3, #124 ; 0x7c\n",
- " 384: 237e movs r3, #126 ; 0x7e\n",
- " 386: 2380 movs r3, #128 ; 0x80\n",
- " 388: 2382 movs r3, #130 ; 0x82\n",
- " 38a: 2384 movs r3, #132 ; 0x84\n",
- " 38c: 2386 movs r3, #134 ; 0x86\n",
- " 38e: 2388 movs r3, #136 ; 0x88\n",
- " 390: 238a movs r3, #138 ; 0x8a\n",
- " 392: 238c movs r3, #140 ; 0x8c\n",
- " 394: 238e movs r3, #142 ; 0x8e\n",
- " 396: 2390 movs r3, #144 ; 0x90\n",
- " 398: 2392 movs r3, #146 ; 0x92\n",
- " 39a: 2394 movs r3, #148 ; 0x94\n",
- " 39c: 2396 movs r3, #150 ; 0x96\n",
- " 39e: 2398 movs r3, #152 ; 0x98\n",
- " 3a0: 239a movs r3, #154 ; 0x9a\n",
- " 3a2: 239c movs r3, #156 ; 0x9c\n",
- " 3a4: 239e movs r3, #158 ; 0x9e\n",
- " 3a6: 23a0 movs r3, #160 ; 0xa0\n",
- " 3a8: 23a2 movs r3, #162 ; 0xa2\n",
- " 3aa: 23a4 movs r3, #164 ; 0xa4\n",
- " 3ac: 23a6 movs r3, #166 ; 0xa6\n",
- " 3ae: 23a8 movs r3, #168 ; 0xa8\n",
- " 3b0: 23aa movs r3, #170 ; 0xaa\n",
- " 3b2: 23ac movs r3, #172 ; 0xac\n",
- " 3b4: 23ae movs r3, #174 ; 0xae\n",
- " 3b6: 23b0 movs r3, #176 ; 0xb0\n",
- " 3b8: 23b2 movs r3, #178 ; 0xb2\n",
- " 3ba: 23b4 movs r3, #180 ; 0xb4\n",
- " 3bc: 23b6 movs r3, #182 ; 0xb6\n",
- " 3be: 23b8 movs r3, #184 ; 0xb8\n",
- " 3c0: 23ba movs r3, #186 ; 0xba\n",
- " 3c2: 23bc movs r3, #188 ; 0xbc\n",
- " 3c4: 23be movs r3, #190 ; 0xbe\n",
- " 3c6: 23c0 movs r3, #192 ; 0xc0\n",
- " 3c8: 23c2 movs r3, #194 ; 0xc2\n",
- " 3ca: 23c4 movs r3, #196 ; 0xc4\n",
- " 3cc: 23c6 movs r3, #198 ; 0xc6\n",
- " 3ce: 23c8 movs r3, #200 ; 0xc8\n",
- " 3d0: 23ca movs r3, #202 ; 0xca\n",
- " 3d2: 23cc movs r3, #204 ; 0xcc\n",
- " 3d4: 23ce movs r3, #206 ; 0xce\n",
- " 3d6: 23d0 movs r3, #208 ; 0xd0\n",
- " 3d8: 23d2 movs r3, #210 ; 0xd2\n",
- " 3da: 23d4 movs r3, #212 ; 0xd4\n",
- " 3dc: 23d6 movs r3, #214 ; 0xd6\n",
- " 3de: 23d8 movs r3, #216 ; 0xd8\n",
- " 3e0: 23da movs r3, #218 ; 0xda\n",
- " 3e2: 23dc movs r3, #220 ; 0xdc\n",
- " 3e4: 23de movs r3, #222 ; 0xde\n",
- " 3e6: 23e0 movs r3, #224 ; 0xe0\n",
- " 3e8: 23e2 movs r3, #226 ; 0xe2\n",
- " 3ea: 23e4 movs r3, #228 ; 0xe4\n",
- " 3ec: 23e6 movs r3, #230 ; 0xe6\n",
- " 3ee: 23e8 movs r3, #232 ; 0xe8\n",
- " 3f0: 23ea movs r3, #234 ; 0xea\n",
- " 3f2: 23ec movs r3, #236 ; 0xec\n",
- " 3f4: 23ee movs r3, #238 ; 0xee\n",
- " 3f6: 23f0 movs r3, #240 ; 0xf0\n",
- " 3f8: 23f2 movs r3, #242 ; 0xf2\n",
- " 3fa: 23f4 movs r3, #244 ; 0xf4\n",
- " 3fc: 23f6 movs r3, #246 ; 0xf6\n",
- " 3fe: 23f8 movs r3, #248 ; 0xf8\n",
- " 400: 23fa movs r3, #250 ; 0xfa\n",
- " 402: 23fc movs r3, #252 ; 0xfc\n",
- " 404: 23fe movs r3, #254 ; 0xfe\n",
- " 406: 2300 movs r3, #0\n",
- " 408: 2302 movs r3, #2\n",
- " 40a: 2304 movs r3, #4\n",
- " 40c: 2306 movs r3, #6\n",
- " 40e: 2308 movs r3, #8\n",
- " 410: 230a movs r3, #10\n",
- " 412: 230c movs r3, #12\n",
- " 414: 230e movs r3, #14\n",
- " 416: 2310 movs r3, #16\n",
- " 418: 2312 movs r3, #18\n",
- " 41a: 2314 movs r3, #20\n",
- " 41c: 2316 movs r3, #22\n",
- " 41e: 2318 movs r3, #24\n",
- " 420: 231a movs r3, #26\n",
- " 422: 231c movs r3, #28\n",
- " 424: 231e movs r3, #30\n",
- " 426: 2320 movs r3, #32\n",
- " 428: 2322 movs r3, #34 ; 0x22\n",
- " 42a: 2324 movs r3, #36 ; 0x24\n",
- " 42c: 2326 movs r3, #38 ; 0x26\n",
- " 42e: 2328 movs r3, #40 ; 0x28\n",
- " 430: 232a movs r3, #42 ; 0x2a\n",
- " 432: 232c movs r3, #44 ; 0x2c\n",
- " 434: 232e movs r3, #46 ; 0x2e\n",
- " 436: 2330 movs r3, #48 ; 0x30\n",
- " 438: 2332 movs r3, #50 ; 0x32\n",
- " 43a: 2334 movs r3, #52 ; 0x34\n",
- " 43c: 2336 movs r3, #54 ; 0x36\n",
- " 43e: 2338 movs r3, #56 ; 0x38\n",
- " 440: 233a movs r3, #58 ; 0x3a\n",
- " 442: 233c movs r3, #60 ; 0x3c\n",
- " 444: 233e movs r3, #62 ; 0x3e\n",
- " 446: 2340 movs r3, #64 ; 0x40\n",
- " 448: 2342 movs r3, #66 ; 0x42\n",
- " 44a: 2344 movs r3, #68 ; 0x44\n",
- " 44c: 2346 movs r3, #70 ; 0x46\n",
- " 44e: 2348 movs r3, #72 ; 0x48\n",
- " 450: 234a movs r3, #74 ; 0x4a\n",
- " 452: 234c movs r3, #76 ; 0x4c\n",
- " 454: 234e movs r3, #78 ; 0x4e\n",
- " 456: 2350 movs r3, #80 ; 0x50\n",
- " 458: 2352 movs r3, #82 ; 0x52\n",
- " 45a: 2354 movs r3, #84 ; 0x54\n",
- " 45c: 2356 movs r3, #86 ; 0x56\n",
- " 45e: 2358 movs r3, #88 ; 0x58\n",
- " 460: 235a movs r3, #90 ; 0x5a\n",
- " 462: 235c movs r3, #92 ; 0x5c\n",
- " 464: 235e movs r3, #94 ; 0x5e\n",
- " 466: 2360 movs r3, #96 ; 0x60\n",
- " 468: 2362 movs r3, #98 ; 0x62\n",
- " 46a: 2364 movs r3, #100 ; 0x64\n",
- " 46c: 2366 movs r3, #102 ; 0x66\n",
- " 46e: 2368 movs r3, #104 ; 0x68\n",
- " 470: 236a movs r3, #106 ; 0x6a\n",
- " 472: 236c movs r3, #108 ; 0x6c\n",
- " 474: 236e movs r3, #110 ; 0x6e\n",
- " 476: 2370 movs r3, #112 ; 0x70\n",
- " 478: 2372 movs r3, #114 ; 0x72\n",
- " 47a: 2374 movs r3, #116 ; 0x74\n",
- " 47c: 2376 movs r3, #118 ; 0x76\n",
- " 47e: 2378 movs r3, #120 ; 0x78\n",
- " 480: 237a movs r3, #122 ; 0x7a\n",
- " 482: 237c movs r3, #124 ; 0x7c\n",
- " 484: 237e movs r3, #126 ; 0x7e\n",
- " 486: 2380 movs r3, #128 ; 0x80\n",
- " 488: 2382 movs r3, #130 ; 0x82\n",
- " 48a: 2384 movs r3, #132 ; 0x84\n",
- " 48c: 2386 movs r3, #134 ; 0x86\n",
- " 48e: 2388 movs r3, #136 ; 0x88\n",
- " 490: 238a movs r3, #138 ; 0x8a\n",
- " 492: 238c movs r3, #140 ; 0x8c\n",
- " 494: 238e movs r3, #142 ; 0x8e\n",
- " 496: 2390 movs r3, #144 ; 0x90\n",
- " 498: 2392 movs r3, #146 ; 0x92\n",
- " 49a: 2394 movs r3, #148 ; 0x94\n",
- " 49c: 2396 movs r3, #150 ; 0x96\n",
- " 49e: 2398 movs r3, #152 ; 0x98\n",
- " 4a0: 239a movs r3, #154 ; 0x9a\n",
- " 4a2: 239c movs r3, #156 ; 0x9c\n",
- " 4a4: 239e movs r3, #158 ; 0x9e\n",
- " 4a6: 23a0 movs r3, #160 ; 0xa0\n",
- " 4a8: 23a2 movs r3, #162 ; 0xa2\n",
- " 4aa: 23a4 movs r3, #164 ; 0xa4\n",
- " 4ac: 23a6 movs r3, #166 ; 0xa6\n",
- " 4ae: 23a8 movs r3, #168 ; 0xa8\n",
- " 4b0: 23aa movs r3, #170 ; 0xaa\n",
- " 4b2: 23ac movs r3, #172 ; 0xac\n",
- " 4b4: 23ae movs r3, #174 ; 0xae\n",
- " 4b6: 23b0 movs r3, #176 ; 0xb0\n",
- " 4b8: 23b2 movs r3, #178 ; 0xb2\n",
- " 4ba: 23b4 movs r3, #180 ; 0xb4\n",
- " 4bc: 23b6 movs r3, #182 ; 0xb6\n",
- " 4be: 23b8 movs r3, #184 ; 0xb8\n",
- " 4c0: 23ba movs r3, #186 ; 0xba\n",
- " 4c2: 23bc movs r3, #188 ; 0xbc\n",
- " 4c4: 23be movs r3, #190 ; 0xbe\n",
- " 4c6: 23c0 movs r3, #192 ; 0xc0\n",
- " 4c8: 23c2 movs r3, #194 ; 0xc2\n",
- " 4ca: 23c4 movs r3, #196 ; 0xc4\n",
- " 4cc: 23c6 movs r3, #198 ; 0xc6\n",
- " 4ce: 23c8 movs r3, #200 ; 0xc8\n",
- " 4d0: 23ca movs r3, #202 ; 0xca\n",
- " 4d2: 23cc movs r3, #204 ; 0xcc\n",
- " 4d4: 23ce movs r3, #206 ; 0xce\n",
- " 4d6: 23d0 movs r3, #208 ; 0xd0\n",
- " 4d8: 23d2 movs r3, #210 ; 0xd2\n",
- " 4da: 23d4 movs r3, #212 ; 0xd4\n",
- " 4dc: 23d6 movs r3, #214 ; 0xd6\n",
- " 4de: 23d8 movs r3, #216 ; 0xd8\n",
- " 4e0: 23da movs r3, #218 ; 0xda\n",
- " 4e2: 23dc movs r3, #220 ; 0xdc\n",
- " 4e4: 23de movs r3, #222 ; 0xde\n",
- " 4e6: 23e0 movs r3, #224 ; 0xe0\n",
- " 4e8: 23e2 movs r3, #226 ; 0xe2\n",
- " 4ea: 23e4 movs r3, #228 ; 0xe4\n",
- " 4ec: 23e6 movs r3, #230 ; 0xe6\n",
- " 4ee: 23e8 movs r3, #232 ; 0xe8\n",
- " 4f0: 23ea movs r3, #234 ; 0xea\n",
- " 4f2: 23ec movs r3, #236 ; 0xec\n",
- " 4f4: 23ee movs r3, #238 ; 0xee\n",
- " 4f6: 23f0 movs r3, #240 ; 0xf0\n",
- " 4f8: 23f2 movs r3, #242 ; 0xf2\n",
- " 4fa: 23f4 movs r3, #244 ; 0xf4\n",
- " 4fc: 23f6 movs r3, #246 ; 0xf6\n",
- " 4fe: 23f8 movs r3, #248 ; 0xf8\n",
- " 500: 23fa movs r3, #250 ; 0xfa\n",
- " 502: 23fc movs r3, #252 ; 0xfc\n",
- " 504: 23fe movs r3, #254 ; 0xfe\n",
- " 506: 2300 movs r3, #0\n",
- " 508: 2302 movs r3, #2\n",
- " 50a: 2304 movs r3, #4\n",
- " 50c: 2306 movs r3, #6\n",
- " 50e: 2308 movs r3, #8\n",
- " 510: 230a movs r3, #10\n",
- " 512: 230c movs r3, #12\n",
- " 514: 230e movs r3, #14\n",
- " 516: 2310 movs r3, #16\n",
- " 518: 2312 movs r3, #18\n",
- " 51a: 2314 movs r3, #20\n",
- " 51c: 2316 movs r3, #22\n",
- " 51e: 2318 movs r3, #24\n",
- " 520: 231a movs r3, #26\n",
- " 522: 231c movs r3, #28\n",
- " 524: 231e movs r3, #30\n",
- " 526: 2320 movs r3, #32\n",
- " 528: 2322 movs r3, #34 ; 0x22\n",
- " 52a: 2324 movs r3, #36 ; 0x24\n",
- " 52c: 2326 movs r3, #38 ; 0x26\n",
- " 52e: 2328 movs r3, #40 ; 0x28\n",
- " 530: 232a movs r3, #42 ; 0x2a\n",
- " 532: 232c movs r3, #44 ; 0x2c\n",
- " 534: 232e movs r3, #46 ; 0x2e\n",
- " 536: 2330 movs r3, #48 ; 0x30\n",
- " 538: 2332 movs r3, #50 ; 0x32\n",
- " 53a: 2334 movs r3, #52 ; 0x34\n",
- " 53c: 2336 movs r3, #54 ; 0x36\n",
- " 53e: 2338 movs r3, #56 ; 0x38\n",
- " 540: 233a movs r3, #58 ; 0x3a\n",
- " 542: 233c movs r3, #60 ; 0x3c\n",
- " 544: 233e movs r3, #62 ; 0x3e\n",
- " 546: 2340 movs r3, #64 ; 0x40\n",
- " 548: 2342 movs r3, #66 ; 0x42\n",
- " 54a: 2344 movs r3, #68 ; 0x44\n",
- " 54c: 2346 movs r3, #70 ; 0x46\n",
- " 54e: 2348 movs r3, #72 ; 0x48\n",
- " 550: 234a movs r3, #74 ; 0x4a\n",
- " 552: 234c movs r3, #76 ; 0x4c\n",
- " 554: 234e movs r3, #78 ; 0x4e\n",
- " 556: 2350 movs r3, #80 ; 0x50\n",
- " 558: 2352 movs r3, #82 ; 0x52\n",
- " 55a: 2354 movs r3, #84 ; 0x54\n",
- " 55c: 2356 movs r3, #86 ; 0x56\n",
- " 55e: 2358 movs r3, #88 ; 0x58\n",
- " 560: 235a movs r3, #90 ; 0x5a\n",
- " 562: 235c movs r3, #92 ; 0x5c\n",
- " 564: 235e movs r3, #94 ; 0x5e\n",
- " 566: 2360 movs r3, #96 ; 0x60\n",
- " 568: 2362 movs r3, #98 ; 0x62\n",
- " 56a: 2364 movs r3, #100 ; 0x64\n",
- " 56c: 2366 movs r3, #102 ; 0x66\n",
- " 56e: 2368 movs r3, #104 ; 0x68\n",
- " 570: 236a movs r3, #106 ; 0x6a\n",
- " 572: 236c movs r3, #108 ; 0x6c\n",
- " 574: 236e movs r3, #110 ; 0x6e\n",
- " 576: 2370 movs r3, #112 ; 0x70\n",
- " 578: 2372 movs r3, #114 ; 0x72\n",
- " 57a: 2374 movs r3, #116 ; 0x74\n",
- " 57c: 2376 movs r3, #118 ; 0x76\n",
- " 57e: 2378 movs r3, #120 ; 0x78\n",
- " 580: 237a movs r3, #122 ; 0x7a\n",
- " 582: 237c movs r3, #124 ; 0x7c\n",
- " 584: 237e movs r3, #126 ; 0x7e\n",
- " 586: 2380 movs r3, #128 ; 0x80\n",
- " 588: 2382 movs r3, #130 ; 0x82\n",
- " 58a: 2384 movs r3, #132 ; 0x84\n",
- " 58c: 2386 movs r3, #134 ; 0x86\n",
- " 58e: 2388 movs r3, #136 ; 0x88\n",
- " 590: 238a movs r3, #138 ; 0x8a\n",
- " 592: 238c movs r3, #140 ; 0x8c\n",
- " 594: 238e movs r3, #142 ; 0x8e\n",
- " 596: 2390 movs r3, #144 ; 0x90\n",
- " 598: 2392 movs r3, #146 ; 0x92\n",
- " 59a: 2394 movs r3, #148 ; 0x94\n",
- " 59c: 2396 movs r3, #150 ; 0x96\n",
- " 59e: 2398 movs r3, #152 ; 0x98\n",
- " 5a0: 239a movs r3, #154 ; 0x9a\n",
- " 5a2: 239c movs r3, #156 ; 0x9c\n",
- " 5a4: 239e movs r3, #158 ; 0x9e\n",
- " 5a6: 23a0 movs r3, #160 ; 0xa0\n",
- " 5a8: 23a2 movs r3, #162 ; 0xa2\n",
- " 5aa: 23a4 movs r3, #164 ; 0xa4\n",
- " 5ac: 23a6 movs r3, #166 ; 0xa6\n",
- " 5ae: 23a8 movs r3, #168 ; 0xa8\n",
- " 5b0: 23aa movs r3, #170 ; 0xaa\n",
- " 5b2: 23ac movs r3, #172 ; 0xac\n",
- " 5b4: 23ae movs r3, #174 ; 0xae\n",
- " 5b6: 23b0 movs r3, #176 ; 0xb0\n",
- " 5b8: 23b2 movs r3, #178 ; 0xb2\n",
- " 5ba: 23b4 movs r3, #180 ; 0xb4\n",
- " 5bc: 23b6 movs r3, #182 ; 0xb6\n",
- " 5be: 23b8 movs r3, #184 ; 0xb8\n",
- " 5c0: 23ba movs r3, #186 ; 0xba\n",
- " 5c2: 23bc movs r3, #188 ; 0xbc\n",
- " 5c4: 23be movs r3, #190 ; 0xbe\n",
- " 5c6: 23c0 movs r3, #192 ; 0xc0\n",
- " 5c8: 23c2 movs r3, #194 ; 0xc2\n",
- " 5ca: 23c4 movs r3, #196 ; 0xc4\n",
- " 5cc: 23c6 movs r3, #198 ; 0xc6\n",
- " 5ce: 23c8 movs r3, #200 ; 0xc8\n",
- " 5d0: 23ca movs r3, #202 ; 0xca\n",
- " 5d2: 23cc movs r3, #204 ; 0xcc\n",
- " 5d4: 23ce movs r3, #206 ; 0xce\n",
- " 5d6: 23d0 movs r3, #208 ; 0xd0\n",
- " 5d8: 23d2 movs r3, #210 ; 0xd2\n",
- " 5da: 23d4 movs r3, #212 ; 0xd4\n",
- " 5dc: 23d6 movs r3, #214 ; 0xd6\n",
- " 5de: 23d8 movs r3, #216 ; 0xd8\n",
- " 5e0: 23da movs r3, #218 ; 0xda\n",
- " 5e2: 23dc movs r3, #220 ; 0xdc\n",
- " 5e4: 23de movs r3, #222 ; 0xde\n",
- " 5e6: 23e0 movs r3, #224 ; 0xe0\n",
- " 5e8: 23e2 movs r3, #226 ; 0xe2\n",
- " 5ea: 23e4 movs r3, #228 ; 0xe4\n",
- " 5ec: 23e6 movs r3, #230 ; 0xe6\n",
- " 5ee: 23e8 movs r3, #232 ; 0xe8\n",
- " 5f0: 23ea movs r3, #234 ; 0xea\n",
- " 5f2: 23ec movs r3, #236 ; 0xec\n",
- " 5f4: 23ee movs r3, #238 ; 0xee\n",
- " 5f6: 23f0 movs r3, #240 ; 0xf0\n",
- " 5f8: 23f2 movs r3, #242 ; 0xf2\n",
- " 5fa: 23f4 movs r3, #244 ; 0xf4\n",
- " 5fc: 23f6 movs r3, #246 ; 0xf6\n",
- " 5fe: 23f8 movs r3, #248 ; 0xf8\n",
- " 600: 23fa movs r3, #250 ; 0xfa\n",
- " 602: 23fc movs r3, #252 ; 0xfc\n",
- " 604: 23fe movs r3, #254 ; 0xfe\n",
- " 606: 2300 movs r3, #0\n",
- " 608: 2302 movs r3, #2\n",
- " 60a: 2304 movs r3, #4\n",
- " 60c: 2306 movs r3, #6\n",
- " 60e: 2308 movs r3, #8\n",
- " 610: 230a movs r3, #10\n",
- " 612: 230c movs r3, #12\n",
- " 614: 230e movs r3, #14\n",
- " 616: 2310 movs r3, #16\n",
- " 618: 2312 movs r3, #18\n",
- " 61a: 2314 movs r3, #20\n",
- " 61c: 2316 movs r3, #22\n",
- " 61e: 2318 movs r3, #24\n",
- " 620: 231a movs r3, #26\n",
- " 622: 231c movs r3, #28\n",
- " 624: 231e movs r3, #30\n",
- " 626: 2320 movs r3, #32\n",
- " 628: 2322 movs r3, #34 ; 0x22\n",
- " 62a: 2324 movs r3, #36 ; 0x24\n",
- " 62c: 2326 movs r3, #38 ; 0x26\n",
- " 62e: 2328 movs r3, #40 ; 0x28\n",
- " 630: 232a movs r3, #42 ; 0x2a\n",
- " 632: 232c movs r3, #44 ; 0x2c\n",
- " 634: 232e movs r3, #46 ; 0x2e\n",
- " 636: 2330 movs r3, #48 ; 0x30\n",
- " 638: 2332 movs r3, #50 ; 0x32\n",
- " 63a: 2334 movs r3, #52 ; 0x34\n",
- " 63c: 2336 movs r3, #54 ; 0x36\n",
- " 63e: 2338 movs r3, #56 ; 0x38\n",
- " 640: 233a movs r3, #58 ; 0x3a\n",
- " 642: 233c movs r3, #60 ; 0x3c\n",
- " 644: 233e movs r3, #62 ; 0x3e\n",
- " 646: 2340 movs r3, #64 ; 0x40\n",
- " 648: 2342 movs r3, #66 ; 0x42\n",
- " 64a: 2344 movs r3, #68 ; 0x44\n",
- " 64c: 2346 movs r3, #70 ; 0x46\n",
- " 64e: 2348 movs r3, #72 ; 0x48\n",
- " 650: 234a movs r3, #74 ; 0x4a\n",
- " 652: 234c movs r3, #76 ; 0x4c\n",
- " 654: 234e movs r3, #78 ; 0x4e\n",
- " 656: 2350 movs r3, #80 ; 0x50\n",
- " 658: 2352 movs r3, #82 ; 0x52\n",
- " 65a: 2354 movs r3, #84 ; 0x54\n",
- " 65c: 2356 movs r3, #86 ; 0x56\n",
- " 65e: 2358 movs r3, #88 ; 0x58\n",
- " 660: 235a movs r3, #90 ; 0x5a\n",
- " 662: 235c movs r3, #92 ; 0x5c\n",
- " 664: 235e movs r3, #94 ; 0x5e\n",
- " 666: 2360 movs r3, #96 ; 0x60\n",
- " 668: 2362 movs r3, #98 ; 0x62\n",
- " 66a: 2364 movs r3, #100 ; 0x64\n",
- " 66c: 2366 movs r3, #102 ; 0x66\n",
- " 66e: 2368 movs r3, #104 ; 0x68\n",
- " 670: 236a movs r3, #106 ; 0x6a\n",
- " 672: 236c movs r3, #108 ; 0x6c\n",
- " 674: 236e movs r3, #110 ; 0x6e\n",
- " 676: 2370 movs r3, #112 ; 0x70\n",
- " 678: 2372 movs r3, #114 ; 0x72\n",
- " 67a: 2374 movs r3, #116 ; 0x74\n",
- " 67c: 2376 movs r3, #118 ; 0x76\n",
- " 67e: 2378 movs r3, #120 ; 0x78\n",
- " 680: 237a movs r3, #122 ; 0x7a\n",
- " 682: 237c movs r3, #124 ; 0x7c\n",
- " 684: 237e movs r3, #126 ; 0x7e\n",
- " 686: 2380 movs r3, #128 ; 0x80\n",
- " 688: 2382 movs r3, #130 ; 0x82\n",
- " 68a: 2384 movs r3, #132 ; 0x84\n",
- " 68c: 2386 movs r3, #134 ; 0x86\n",
- " 68e: 2388 movs r3, #136 ; 0x88\n",
- " 690: 238a movs r3, #138 ; 0x8a\n",
- " 692: 238c movs r3, #140 ; 0x8c\n",
- " 694: 238e movs r3, #142 ; 0x8e\n",
- " 696: 2390 movs r3, #144 ; 0x90\n",
- " 698: 2392 movs r3, #146 ; 0x92\n",
- " 69a: 2394 movs r3, #148 ; 0x94\n",
- " 69c: 2396 movs r3, #150 ; 0x96\n",
- " 69e: 2398 movs r3, #152 ; 0x98\n",
- " 6a0: 239a movs r3, #154 ; 0x9a\n",
- " 6a2: 239c movs r3, #156 ; 0x9c\n",
- " 6a4: 239e movs r3, #158 ; 0x9e\n",
- " 6a6: 23a0 movs r3, #160 ; 0xa0\n",
- " 6a8: 23a2 movs r3, #162 ; 0xa2\n",
- " 6aa: 23a4 movs r3, #164 ; 0xa4\n",
- " 6ac: 23a6 movs r3, #166 ; 0xa6\n",
- " 6ae: 23a8 movs r3, #168 ; 0xa8\n",
- " 6b0: 23aa movs r3, #170 ; 0xaa\n",
- " 6b2: 23ac movs r3, #172 ; 0xac\n",
- " 6b4: 23ae movs r3, #174 ; 0xae\n",
- " 6b6: 23b0 movs r3, #176 ; 0xb0\n",
- " 6b8: 23b2 movs r3, #178 ; 0xb2\n",
- " 6ba: 23b4 movs r3, #180 ; 0xb4\n",
- " 6bc: 23b6 movs r3, #182 ; 0xb6\n",
- " 6be: 23b8 movs r3, #184 ; 0xb8\n",
- " 6c0: 23ba movs r3, #186 ; 0xba\n",
- " 6c2: 23bc movs r3, #188 ; 0xbc\n",
- " 6c4: 23be movs r3, #190 ; 0xbe\n",
- " 6c6: 23c0 movs r3, #192 ; 0xc0\n",
- " 6c8: 23c2 movs r3, #194 ; 0xc2\n",
- " 6ca: 23c4 movs r3, #196 ; 0xc4\n",
- " 6cc: 23c6 movs r3, #198 ; 0xc6\n",
- " 6ce: 23c8 movs r3, #200 ; 0xc8\n",
- " 6d0: 23ca movs r3, #202 ; 0xca\n",
- " 6d2: 23cc movs r3, #204 ; 0xcc\n",
- " 6d4: 23ce movs r3, #206 ; 0xce\n",
- " 6d6: 23d0 movs r3, #208 ; 0xd0\n",
- " 6d8: 23d2 movs r3, #210 ; 0xd2\n",
- " 6da: 23d4 movs r3, #212 ; 0xd4\n",
- " 6dc: 23d6 movs r3, #214 ; 0xd6\n",
- " 6de: 23d8 movs r3, #216 ; 0xd8\n",
- " 6e0: 23da movs r3, #218 ; 0xda\n",
- " 6e2: 23dc movs r3, #220 ; 0xdc\n",
- " 6e4: 23de movs r3, #222 ; 0xde\n",
- " 6e6: 23e0 movs r3, #224 ; 0xe0\n",
- " 6e8: 23e2 movs r3, #226 ; 0xe2\n",
- " 6ea: 23e4 movs r3, #228 ; 0xe4\n",
- " 6ec: 23e6 movs r3, #230 ; 0xe6\n",
- " 6ee: 23e8 movs r3, #232 ; 0xe8\n",
- " 6f0: 23ea movs r3, #234 ; 0xea\n",
- " 6f2: 23ec movs r3, #236 ; 0xec\n",
- " 6f4: 23ee movs r3, #238 ; 0xee\n",
- " 6f6: 23f0 movs r3, #240 ; 0xf0\n",
- " 6f8: 23f2 movs r3, #242 ; 0xf2\n",
- " 6fa: 23f4 movs r3, #244 ; 0xf4\n",
- " 6fc: 23f6 movs r3, #246 ; 0xf6\n",
- " 6fe: 23f8 movs r3, #248 ; 0xf8\n",
- " 700: 23fa movs r3, #250 ; 0xfa\n",
- " 702: 23fc movs r3, #252 ; 0xfc\n",
- " 704: 23fe movs r3, #254 ; 0xfe\n",
- " 706: 2300 movs r3, #0\n",
- " 708: 2302 movs r3, #2\n",
- " 70a: 2304 movs r3, #4\n",
- " 70c: 2306 movs r3, #6\n",
- " 70e: 2308 movs r3, #8\n",
- " 710: 230a movs r3, #10\n",
- " 712: 230c movs r3, #12\n",
- " 714: 230e movs r3, #14\n",
- " 716: 2310 movs r3, #16\n",
- " 718: 2312 movs r3, #18\n",
- " 71a: 2314 movs r3, #20\n",
- " 71c: 2316 movs r3, #22\n",
- " 71e: 2318 movs r3, #24\n",
- " 720: 231a movs r3, #26\n",
- " 722: 231c movs r3, #28\n",
- " 724: 231e movs r3, #30\n",
- " 726: 2320 movs r3, #32\n",
- " 728: 2322 movs r3, #34 ; 0x22\n",
- " 72a: 2324 movs r3, #36 ; 0x24\n",
- " 72c: 2326 movs r3, #38 ; 0x26\n",
- " 72e: 2328 movs r3, #40 ; 0x28\n",
- " 730: 232a movs r3, #42 ; 0x2a\n",
- " 732: 232c movs r3, #44 ; 0x2c\n",
- " 734: 232e movs r3, #46 ; 0x2e\n",
- " 736: 2330 movs r3, #48 ; 0x30\n",
- " 738: 2332 movs r3, #50 ; 0x32\n",
- " 73a: 2334 movs r3, #52 ; 0x34\n",
- " 73c: 2336 movs r3, #54 ; 0x36\n",
- " 73e: 2338 movs r3, #56 ; 0x38\n",
- " 740: 233a movs r3, #58 ; 0x3a\n",
- " 742: 233c movs r3, #60 ; 0x3c\n",
- " 744: 233e movs r3, #62 ; 0x3e\n",
- " 746: 2340 movs r3, #64 ; 0x40\n",
- " 748: 2342 movs r3, #66 ; 0x42\n",
- " 74a: 2344 movs r3, #68 ; 0x44\n",
- " 74c: 2346 movs r3, #70 ; 0x46\n",
- " 74e: 2348 movs r3, #72 ; 0x48\n",
- " 750: 234a movs r3, #74 ; 0x4a\n",
- " 752: 234c movs r3, #76 ; 0x4c\n",
- " 754: 234e movs r3, #78 ; 0x4e\n",
- " 756: 2350 movs r3, #80 ; 0x50\n",
- " 758: 2352 movs r3, #82 ; 0x52\n",
- " 75a: 2354 movs r3, #84 ; 0x54\n",
- " 75c: 2356 movs r3, #86 ; 0x56\n",
- " 75e: 2358 movs r3, #88 ; 0x58\n",
- " 760: 235a movs r3, #90 ; 0x5a\n",
- " 762: 235c movs r3, #92 ; 0x5c\n",
- " 764: 235e movs r3, #94 ; 0x5e\n",
- " 766: 2360 movs r3, #96 ; 0x60\n",
- " 768: 2362 movs r3, #98 ; 0x62\n",
- " 76a: 2364 movs r3, #100 ; 0x64\n",
- " 76c: 2366 movs r3, #102 ; 0x66\n",
- " 76e: 2368 movs r3, #104 ; 0x68\n",
- " 770: 236a movs r3, #106 ; 0x6a\n",
- " 772: 236c movs r3, #108 ; 0x6c\n",
- " 774: 236e movs r3, #110 ; 0x6e\n",
- " 776: 2370 movs r3, #112 ; 0x70\n",
- " 778: 2372 movs r3, #114 ; 0x72\n",
- " 77a: 2374 movs r3, #116 ; 0x74\n",
- " 77c: 2376 movs r3, #118 ; 0x76\n",
- " 77e: 2378 movs r3, #120 ; 0x78\n",
- " 780: 237a movs r3, #122 ; 0x7a\n",
- " 782: 237c movs r3, #124 ; 0x7c\n",
- " 784: 237e movs r3, #126 ; 0x7e\n",
- " 786: 2380 movs r3, #128 ; 0x80\n",
- " 788: 2382 movs r3, #130 ; 0x82\n",
- " 78a: 2384 movs r3, #132 ; 0x84\n",
- " 78c: 2386 movs r3, #134 ; 0x86\n",
- " 78e: 2388 movs r3, #136 ; 0x88\n",
- " 790: 238a movs r3, #138 ; 0x8a\n",
- " 792: 238c movs r3, #140 ; 0x8c\n",
- " 794: 238e movs r3, #142 ; 0x8e\n",
- " 796: 2390 movs r3, #144 ; 0x90\n",
- " 798: 2392 movs r3, #146 ; 0x92\n",
- " 79a: 2394 movs r3, #148 ; 0x94\n",
- " 79c: 2396 movs r3, #150 ; 0x96\n",
- " 79e: 2398 movs r3, #152 ; 0x98\n",
- " 7a0: 239a movs r3, #154 ; 0x9a\n",
- " 7a2: 239c movs r3, #156 ; 0x9c\n",
- " 7a4: 239e movs r3, #158 ; 0x9e\n",
- " 7a6: 23a0 movs r3, #160 ; 0xa0\n",
- " 7a8: 23a2 movs r3, #162 ; 0xa2\n",
- " 7aa: 23a4 movs r3, #164 ; 0xa4\n",
- " 7ac: 23a6 movs r3, #166 ; 0xa6\n",
- " 7ae: 23a8 movs r3, #168 ; 0xa8\n",
- " 7b0: 23aa movs r3, #170 ; 0xaa\n",
- " 7b2: 23ac movs r3, #172 ; 0xac\n",
- " 7b4: 23ae movs r3, #174 ; 0xae\n",
- " 7b6: 23b0 movs r3, #176 ; 0xb0\n",
- " 7b8: 23b2 movs r3, #178 ; 0xb2\n",
- " 7ba: 23b4 movs r3, #180 ; 0xb4\n",
- " 7bc: 23b6 movs r3, #182 ; 0xb6\n",
- " 7be: 23b8 movs r3, #184 ; 0xb8\n",
- " 7c0: 23ba movs r3, #186 ; 0xba\n",
- " 7c2: 23bc movs r3, #188 ; 0xbc\n",
- " 7c4: 23be movs r3, #190 ; 0xbe\n",
- " 7c6: 23c0 movs r3, #192 ; 0xc0\n",
- " 7c8: 23c2 movs r3, #194 ; 0xc2\n",
- " 7ca: 23c4 movs r3, #196 ; 0xc4\n",
- " 7cc: 23c6 movs r3, #198 ; 0xc6\n",
- " 7ce: 23c8 movs r3, #200 ; 0xc8\n",
- " 7d0: 23ca movs r3, #202 ; 0xca\n",
- " 7d2: 23cc movs r3, #204 ; 0xcc\n",
- " 7d4: 23ce movs r3, #206 ; 0xce\n",
- " 7d6: 23d0 movs r3, #208 ; 0xd0\n",
- " 7d8: 23d2 movs r3, #210 ; 0xd2\n",
- " 7da: 23d4 movs r3, #212 ; 0xd4\n",
- " 7dc: 23d6 movs r3, #214 ; 0xd6\n",
- " 7de: 23d8 movs r3, #216 ; 0xd8\n",
- " 7e0: 23da movs r3, #218 ; 0xda\n",
- " 7e2: 23dc movs r3, #220 ; 0xdc\n",
- " 7e4: 23de movs r3, #222 ; 0xde\n",
- " 7e6: 23e0 movs r3, #224 ; 0xe0\n",
- " 7e8: 23e2 movs r3, #226 ; 0xe2\n",
- " 7ea: 23e4 movs r3, #228 ; 0xe4\n",
- " 7ec: 23e6 movs r3, #230 ; 0xe6\n",
- " 7ee: 23e8 movs r3, #232 ; 0xe8\n",
- " 7f0: 23ea movs r3, #234 ; 0xea\n",
- " 7f2: 23ec movs r3, #236 ; 0xec\n",
- " 7f4: 23ee movs r3, #238 ; 0xee\n",
- " 7f6: 23f0 movs r3, #240 ; 0xf0\n",
- " 7f8: 23f2 movs r3, #242 ; 0xf2\n",
- " 7fa: 23f4 movs r3, #244 ; 0xf4\n",
- " 7fc: 23f6 movs r3, #246 ; 0xf6\n",
- " 7fe: 23f8 movs r3, #248 ; 0xf8\n",
- " 800: 23fa movs r3, #250 ; 0xfa\n",
- " 802: 23fc movs r3, #252 ; 0xfc\n",
- " 804: 23fe movs r3, #254 ; 0xfe\n",
- " 806: 2300 movs r3, #0\n",
- " 808: 4611 mov r1, r2\n",
- nullptr
-};
-const char* const MixedBranch32Results[] = {
- " 0: f000 bc03 b.w 80a <MixedBranch32+0x80a>\n",
- " 4: 2300 movs r3, #0\n",
- " 6: 2302 movs r3, #2\n",
- " 8: 2304 movs r3, #4\n",
- " a: 2306 movs r3, #6\n",
- " c: 2308 movs r3, #8\n",
- " e: 230a movs r3, #10\n",
- " 10: 230c movs r3, #12\n",
- " 12: 230e movs r3, #14\n",
- " 14: 2310 movs r3, #16\n",
- " 16: 2312 movs r3, #18\n",
- " 18: 2314 movs r3, #20\n",
- " 1a: 2316 movs r3, #22\n",
- " 1c: 2318 movs r3, #24\n",
- " 1e: 231a movs r3, #26\n",
- " 20: 231c movs r3, #28\n",
- " 22: 231e movs r3, #30\n",
- " 24: 2320 movs r3, #32\n",
- " 26: 2322 movs r3, #34 ; 0x22\n",
- " 28: 2324 movs r3, #36 ; 0x24\n",
- " 2a: 2326 movs r3, #38 ; 0x26\n",
- " 2c: 2328 movs r3, #40 ; 0x28\n",
- " 2e: 232a movs r3, #42 ; 0x2a\n",
- " 30: 232c movs r3, #44 ; 0x2c\n",
- " 32: 232e movs r3, #46 ; 0x2e\n",
- " 34: 2330 movs r3, #48 ; 0x30\n",
- " 36: 2332 movs r3, #50 ; 0x32\n",
- " 38: 2334 movs r3, #52 ; 0x34\n",
- " 3a: 2336 movs r3, #54 ; 0x36\n",
- " 3c: 2338 movs r3, #56 ; 0x38\n",
- " 3e: 233a movs r3, #58 ; 0x3a\n",
- " 40: 233c movs r3, #60 ; 0x3c\n",
- " 42: 233e movs r3, #62 ; 0x3e\n",
- " 44: 2340 movs r3, #64 ; 0x40\n",
- " 46: 2342 movs r3, #66 ; 0x42\n",
- " 48: 2344 movs r3, #68 ; 0x44\n",
- " 4a: 2346 movs r3, #70 ; 0x46\n",
- " 4c: 2348 movs r3, #72 ; 0x48\n",
- " 4e: 234a movs r3, #74 ; 0x4a\n",
- " 50: 234c movs r3, #76 ; 0x4c\n",
- " 52: 234e movs r3, #78 ; 0x4e\n",
- " 54: 2350 movs r3, #80 ; 0x50\n",
- " 56: 2352 movs r3, #82 ; 0x52\n",
- " 58: 2354 movs r3, #84 ; 0x54\n",
- " 5a: 2356 movs r3, #86 ; 0x56\n",
- " 5c: 2358 movs r3, #88 ; 0x58\n",
- " 5e: 235a movs r3, #90 ; 0x5a\n",
- " 60: 235c movs r3, #92 ; 0x5c\n",
- " 62: 235e movs r3, #94 ; 0x5e\n",
- " 64: 2360 movs r3, #96 ; 0x60\n",
- " 66: 2362 movs r3, #98 ; 0x62\n",
- " 68: 2364 movs r3, #100 ; 0x64\n",
- " 6a: 2366 movs r3, #102 ; 0x66\n",
- " 6c: 2368 movs r3, #104 ; 0x68\n",
- " 6e: 236a movs r3, #106 ; 0x6a\n",
- " 70: 236c movs r3, #108 ; 0x6c\n",
- " 72: 236e movs r3, #110 ; 0x6e\n",
- " 74: 2370 movs r3, #112 ; 0x70\n",
- " 76: 2372 movs r3, #114 ; 0x72\n",
- " 78: 2374 movs r3, #116 ; 0x74\n",
- " 7a: 2376 movs r3, #118 ; 0x76\n",
- " 7c: 2378 movs r3, #120 ; 0x78\n",
- " 7e: 237a movs r3, #122 ; 0x7a\n",
- " 80: 237c movs r3, #124 ; 0x7c\n",
- " 82: 237e movs r3, #126 ; 0x7e\n",
- " 84: 2380 movs r3, #128 ; 0x80\n",
- " 86: 2382 movs r3, #130 ; 0x82\n",
- " 88: 2384 movs r3, #132 ; 0x84\n",
- " 8a: 2386 movs r3, #134 ; 0x86\n",
- " 8c: 2388 movs r3, #136 ; 0x88\n",
- " 8e: 238a movs r3, #138 ; 0x8a\n",
- " 90: 238c movs r3, #140 ; 0x8c\n",
- " 92: 238e movs r3, #142 ; 0x8e\n",
- " 94: 2390 movs r3, #144 ; 0x90\n",
- " 96: 2392 movs r3, #146 ; 0x92\n",
- " 98: 2394 movs r3, #148 ; 0x94\n",
- " 9a: 2396 movs r3, #150 ; 0x96\n",
- " 9c: 2398 movs r3, #152 ; 0x98\n",
- " 9e: 239a movs r3, #154 ; 0x9a\n",
- " a0: 239c movs r3, #156 ; 0x9c\n",
- " a2: 239e movs r3, #158 ; 0x9e\n",
- " a4: 23a0 movs r3, #160 ; 0xa0\n",
- " a6: 23a2 movs r3, #162 ; 0xa2\n",
- " a8: 23a4 movs r3, #164 ; 0xa4\n",
- " aa: 23a6 movs r3, #166 ; 0xa6\n",
- " ac: 23a8 movs r3, #168 ; 0xa8\n",
- " ae: 23aa movs r3, #170 ; 0xaa\n",
- " b0: 23ac movs r3, #172 ; 0xac\n",
- " b2: 23ae movs r3, #174 ; 0xae\n",
- " b4: 23b0 movs r3, #176 ; 0xb0\n",
- " b6: 23b2 movs r3, #178 ; 0xb2\n",
- " b8: 23b4 movs r3, #180 ; 0xb4\n",
- " ba: 23b6 movs r3, #182 ; 0xb6\n",
- " bc: 23b8 movs r3, #184 ; 0xb8\n",
- " be: 23ba movs r3, #186 ; 0xba\n",
- " c0: 23bc movs r3, #188 ; 0xbc\n",
- " c2: 23be movs r3, #190 ; 0xbe\n",
- " c4: 23c0 movs r3, #192 ; 0xc0\n",
- " c6: 23c2 movs r3, #194 ; 0xc2\n",
- " c8: 23c4 movs r3, #196 ; 0xc4\n",
- " ca: 23c6 movs r3, #198 ; 0xc6\n",
- " cc: 23c8 movs r3, #200 ; 0xc8\n",
- " ce: 23ca movs r3, #202 ; 0xca\n",
- " d0: 23cc movs r3, #204 ; 0xcc\n",
- " d2: 23ce movs r3, #206 ; 0xce\n",
- " d4: 23d0 movs r3, #208 ; 0xd0\n",
- " d6: 23d2 movs r3, #210 ; 0xd2\n",
- " d8: 23d4 movs r3, #212 ; 0xd4\n",
- " da: 23d6 movs r3, #214 ; 0xd6\n",
- " dc: 23d8 movs r3, #216 ; 0xd8\n",
- " de: 23da movs r3, #218 ; 0xda\n",
- " e0: 23dc movs r3, #220 ; 0xdc\n",
- " e2: 23de movs r3, #222 ; 0xde\n",
- " e4: 23e0 movs r3, #224 ; 0xe0\n",
- " e6: 23e2 movs r3, #226 ; 0xe2\n",
- " e8: 23e4 movs r3, #228 ; 0xe4\n",
- " ea: 23e6 movs r3, #230 ; 0xe6\n",
- " ec: 23e8 movs r3, #232 ; 0xe8\n",
- " ee: 23ea movs r3, #234 ; 0xea\n",
- " f0: 23ec movs r3, #236 ; 0xec\n",
- " f2: 23ee movs r3, #238 ; 0xee\n",
- " f4: 23f0 movs r3, #240 ; 0xf0\n",
- " f6: 23f2 movs r3, #242 ; 0xf2\n",
- " f8: 23f4 movs r3, #244 ; 0xf4\n",
- " fa: 23f6 movs r3, #246 ; 0xf6\n",
- " fc: 23f8 movs r3, #248 ; 0xf8\n",
- " fe: 23fa movs r3, #250 ; 0xfa\n",
- " 100: 23fc movs r3, #252 ; 0xfc\n",
- " 102: 23fe movs r3, #254 ; 0xfe\n",
- " 104: 2300 movs r3, #0\n",
- " 106: 2302 movs r3, #2\n",
- " 108: 2304 movs r3, #4\n",
- " 10a: 2306 movs r3, #6\n",
- " 10c: 2308 movs r3, #8\n",
- " 10e: 230a movs r3, #10\n",
- " 110: 230c movs r3, #12\n",
- " 112: 230e movs r3, #14\n",
- " 114: 2310 movs r3, #16\n",
- " 116: 2312 movs r3, #18\n",
- " 118: 2314 movs r3, #20\n",
- " 11a: 2316 movs r3, #22\n",
- " 11c: 2318 movs r3, #24\n",
- " 11e: 231a movs r3, #26\n",
- " 120: 231c movs r3, #28\n",
- " 122: 231e movs r3, #30\n",
- " 124: 2320 movs r3, #32\n",
- " 126: 2322 movs r3, #34 ; 0x22\n",
- " 128: 2324 movs r3, #36 ; 0x24\n",
- " 12a: 2326 movs r3, #38 ; 0x26\n",
- " 12c: 2328 movs r3, #40 ; 0x28\n",
- " 12e: 232a movs r3, #42 ; 0x2a\n",
- " 130: 232c movs r3, #44 ; 0x2c\n",
- " 132: 232e movs r3, #46 ; 0x2e\n",
- " 134: 2330 movs r3, #48 ; 0x30\n",
- " 136: 2332 movs r3, #50 ; 0x32\n",
- " 138: 2334 movs r3, #52 ; 0x34\n",
- " 13a: 2336 movs r3, #54 ; 0x36\n",
- " 13c: 2338 movs r3, #56 ; 0x38\n",
- " 13e: 233a movs r3, #58 ; 0x3a\n",
- " 140: 233c movs r3, #60 ; 0x3c\n",
- " 142: 233e movs r3, #62 ; 0x3e\n",
- " 144: 2340 movs r3, #64 ; 0x40\n",
- " 146: 2342 movs r3, #66 ; 0x42\n",
- " 148: 2344 movs r3, #68 ; 0x44\n",
- " 14a: 2346 movs r3, #70 ; 0x46\n",
- " 14c: 2348 movs r3, #72 ; 0x48\n",
- " 14e: 234a movs r3, #74 ; 0x4a\n",
- " 150: 234c movs r3, #76 ; 0x4c\n",
- " 152: 234e movs r3, #78 ; 0x4e\n",
- " 154: 2350 movs r3, #80 ; 0x50\n",
- " 156: 2352 movs r3, #82 ; 0x52\n",
- " 158: 2354 movs r3, #84 ; 0x54\n",
- " 15a: 2356 movs r3, #86 ; 0x56\n",
- " 15c: 2358 movs r3, #88 ; 0x58\n",
- " 15e: 235a movs r3, #90 ; 0x5a\n",
- " 160: 235c movs r3, #92 ; 0x5c\n",
- " 162: 235e movs r3, #94 ; 0x5e\n",
- " 164: 2360 movs r3, #96 ; 0x60\n",
- " 166: 2362 movs r3, #98 ; 0x62\n",
- " 168: 2364 movs r3, #100 ; 0x64\n",
- " 16a: 2366 movs r3, #102 ; 0x66\n",
- " 16c: 2368 movs r3, #104 ; 0x68\n",
- " 16e: 236a movs r3, #106 ; 0x6a\n",
- " 170: 236c movs r3, #108 ; 0x6c\n",
- " 172: 236e movs r3, #110 ; 0x6e\n",
- " 174: 2370 movs r3, #112 ; 0x70\n",
- " 176: 2372 movs r3, #114 ; 0x72\n",
- " 178: 2374 movs r3, #116 ; 0x74\n",
- " 17a: 2376 movs r3, #118 ; 0x76\n",
- " 17c: 2378 movs r3, #120 ; 0x78\n",
- " 17e: 237a movs r3, #122 ; 0x7a\n",
- " 180: 237c movs r3, #124 ; 0x7c\n",
- " 182: 237e movs r3, #126 ; 0x7e\n",
- " 184: 2380 movs r3, #128 ; 0x80\n",
- " 186: 2382 movs r3, #130 ; 0x82\n",
- " 188: 2384 movs r3, #132 ; 0x84\n",
- " 18a: 2386 movs r3, #134 ; 0x86\n",
- " 18c: 2388 movs r3, #136 ; 0x88\n",
- " 18e: 238a movs r3, #138 ; 0x8a\n",
- " 190: 238c movs r3, #140 ; 0x8c\n",
- " 192: 238e movs r3, #142 ; 0x8e\n",
- " 194: 2390 movs r3, #144 ; 0x90\n",
- " 196: 2392 movs r3, #146 ; 0x92\n",
- " 198: 2394 movs r3, #148 ; 0x94\n",
- " 19a: 2396 movs r3, #150 ; 0x96\n",
- " 19c: 2398 movs r3, #152 ; 0x98\n",
- " 19e: 239a movs r3, #154 ; 0x9a\n",
- " 1a0: 239c movs r3, #156 ; 0x9c\n",
- " 1a2: 239e movs r3, #158 ; 0x9e\n",
- " 1a4: 23a0 movs r3, #160 ; 0xa0\n",
- " 1a6: 23a2 movs r3, #162 ; 0xa2\n",
- " 1a8: 23a4 movs r3, #164 ; 0xa4\n",
- " 1aa: 23a6 movs r3, #166 ; 0xa6\n",
- " 1ac: 23a8 movs r3, #168 ; 0xa8\n",
- " 1ae: 23aa movs r3, #170 ; 0xaa\n",
- " 1b0: 23ac movs r3, #172 ; 0xac\n",
- " 1b2: 23ae movs r3, #174 ; 0xae\n",
- " 1b4: 23b0 movs r3, #176 ; 0xb0\n",
- " 1b6: 23b2 movs r3, #178 ; 0xb2\n",
- " 1b8: 23b4 movs r3, #180 ; 0xb4\n",
- " 1ba: 23b6 movs r3, #182 ; 0xb6\n",
- " 1bc: 23b8 movs r3, #184 ; 0xb8\n",
- " 1be: 23ba movs r3, #186 ; 0xba\n",
- " 1c0: 23bc movs r3, #188 ; 0xbc\n",
- " 1c2: 23be movs r3, #190 ; 0xbe\n",
- " 1c4: 23c0 movs r3, #192 ; 0xc0\n",
- " 1c6: 23c2 movs r3, #194 ; 0xc2\n",
- " 1c8: 23c4 movs r3, #196 ; 0xc4\n",
- " 1ca: 23c6 movs r3, #198 ; 0xc6\n",
- " 1cc: 23c8 movs r3, #200 ; 0xc8\n",
- " 1ce: 23ca movs r3, #202 ; 0xca\n",
- " 1d0: 23cc movs r3, #204 ; 0xcc\n",
- " 1d2: 23ce movs r3, #206 ; 0xce\n",
- " 1d4: 23d0 movs r3, #208 ; 0xd0\n",
- " 1d6: 23d2 movs r3, #210 ; 0xd2\n",
- " 1d8: 23d4 movs r3, #212 ; 0xd4\n",
- " 1da: 23d6 movs r3, #214 ; 0xd6\n",
- " 1dc: 23d8 movs r3, #216 ; 0xd8\n",
- " 1de: 23da movs r3, #218 ; 0xda\n",
- " 1e0: 23dc movs r3, #220 ; 0xdc\n",
- " 1e2: 23de movs r3, #222 ; 0xde\n",
- " 1e4: 23e0 movs r3, #224 ; 0xe0\n",
- " 1e6: 23e2 movs r3, #226 ; 0xe2\n",
- " 1e8: 23e4 movs r3, #228 ; 0xe4\n",
- " 1ea: 23e6 movs r3, #230 ; 0xe6\n",
- " 1ec: 23e8 movs r3, #232 ; 0xe8\n",
- " 1ee: 23ea movs r3, #234 ; 0xea\n",
- " 1f0: 23ec movs r3, #236 ; 0xec\n",
- " 1f2: 23ee movs r3, #238 ; 0xee\n",
- " 1f4: 23f0 movs r3, #240 ; 0xf0\n",
- " 1f6: 23f2 movs r3, #242 ; 0xf2\n",
- " 1f8: 23f4 movs r3, #244 ; 0xf4\n",
- " 1fa: 23f6 movs r3, #246 ; 0xf6\n",
- " 1fc: 23f8 movs r3, #248 ; 0xf8\n",
- " 1fe: 23fa movs r3, #250 ; 0xfa\n",
- " 200: 23fc movs r3, #252 ; 0xfc\n",
- " 202: 23fe movs r3, #254 ; 0xfe\n",
- " 204: 2300 movs r3, #0\n",
- " 206: 2302 movs r3, #2\n",
- " 208: 2304 movs r3, #4\n",
- " 20a: 2306 movs r3, #6\n",
- " 20c: 2308 movs r3, #8\n",
- " 20e: 230a movs r3, #10\n",
- " 210: 230c movs r3, #12\n",
- " 212: 230e movs r3, #14\n",
- " 214: 2310 movs r3, #16\n",
- " 216: 2312 movs r3, #18\n",
- " 218: 2314 movs r3, #20\n",
- " 21a: 2316 movs r3, #22\n",
- " 21c: 2318 movs r3, #24\n",
- " 21e: 231a movs r3, #26\n",
- " 220: 231c movs r3, #28\n",
- " 222: 231e movs r3, #30\n",
- " 224: 2320 movs r3, #32\n",
- " 226: 2322 movs r3, #34 ; 0x22\n",
- " 228: 2324 movs r3, #36 ; 0x24\n",
- " 22a: 2326 movs r3, #38 ; 0x26\n",
- " 22c: 2328 movs r3, #40 ; 0x28\n",
- " 22e: 232a movs r3, #42 ; 0x2a\n",
- " 230: 232c movs r3, #44 ; 0x2c\n",
- " 232: 232e movs r3, #46 ; 0x2e\n",
- " 234: 2330 movs r3, #48 ; 0x30\n",
- " 236: 2332 movs r3, #50 ; 0x32\n",
- " 238: 2334 movs r3, #52 ; 0x34\n",
- " 23a: 2336 movs r3, #54 ; 0x36\n",
- " 23c: 2338 movs r3, #56 ; 0x38\n",
- " 23e: 233a movs r3, #58 ; 0x3a\n",
- " 240: 233c movs r3, #60 ; 0x3c\n",
- " 242: 233e movs r3, #62 ; 0x3e\n",
- " 244: 2340 movs r3, #64 ; 0x40\n",
- " 246: 2342 movs r3, #66 ; 0x42\n",
- " 248: 2344 movs r3, #68 ; 0x44\n",
- " 24a: 2346 movs r3, #70 ; 0x46\n",
- " 24c: 2348 movs r3, #72 ; 0x48\n",
- " 24e: 234a movs r3, #74 ; 0x4a\n",
- " 250: 234c movs r3, #76 ; 0x4c\n",
- " 252: 234e movs r3, #78 ; 0x4e\n",
- " 254: 2350 movs r3, #80 ; 0x50\n",
- " 256: 2352 movs r3, #82 ; 0x52\n",
- " 258: 2354 movs r3, #84 ; 0x54\n",
- " 25a: 2356 movs r3, #86 ; 0x56\n",
- " 25c: 2358 movs r3, #88 ; 0x58\n",
- " 25e: 235a movs r3, #90 ; 0x5a\n",
- " 260: 235c movs r3, #92 ; 0x5c\n",
- " 262: 235e movs r3, #94 ; 0x5e\n",
- " 264: 2360 movs r3, #96 ; 0x60\n",
- " 266: 2362 movs r3, #98 ; 0x62\n",
- " 268: 2364 movs r3, #100 ; 0x64\n",
- " 26a: 2366 movs r3, #102 ; 0x66\n",
- " 26c: 2368 movs r3, #104 ; 0x68\n",
- " 26e: 236a movs r3, #106 ; 0x6a\n",
- " 270: 236c movs r3, #108 ; 0x6c\n",
- " 272: 236e movs r3, #110 ; 0x6e\n",
- " 274: 2370 movs r3, #112 ; 0x70\n",
- " 276: 2372 movs r3, #114 ; 0x72\n",
- " 278: 2374 movs r3, #116 ; 0x74\n",
- " 27a: 2376 movs r3, #118 ; 0x76\n",
- " 27c: 2378 movs r3, #120 ; 0x78\n",
- " 27e: 237a movs r3, #122 ; 0x7a\n",
- " 280: 237c movs r3, #124 ; 0x7c\n",
- " 282: 237e movs r3, #126 ; 0x7e\n",
- " 284: 2380 movs r3, #128 ; 0x80\n",
- " 286: 2382 movs r3, #130 ; 0x82\n",
- " 288: 2384 movs r3, #132 ; 0x84\n",
- " 28a: 2386 movs r3, #134 ; 0x86\n",
- " 28c: 2388 movs r3, #136 ; 0x88\n",
- " 28e: 238a movs r3, #138 ; 0x8a\n",
- " 290: 238c movs r3, #140 ; 0x8c\n",
- " 292: 238e movs r3, #142 ; 0x8e\n",
- " 294: 2390 movs r3, #144 ; 0x90\n",
- " 296: 2392 movs r3, #146 ; 0x92\n",
- " 298: 2394 movs r3, #148 ; 0x94\n",
- " 29a: 2396 movs r3, #150 ; 0x96\n",
- " 29c: 2398 movs r3, #152 ; 0x98\n",
- " 29e: 239a movs r3, #154 ; 0x9a\n",
- " 2a0: 239c movs r3, #156 ; 0x9c\n",
- " 2a2: 239e movs r3, #158 ; 0x9e\n",
- " 2a4: 23a0 movs r3, #160 ; 0xa0\n",
- " 2a6: 23a2 movs r3, #162 ; 0xa2\n",
- " 2a8: 23a4 movs r3, #164 ; 0xa4\n",
- " 2aa: 23a6 movs r3, #166 ; 0xa6\n",
- " 2ac: 23a8 movs r3, #168 ; 0xa8\n",
- " 2ae: 23aa movs r3, #170 ; 0xaa\n",
- " 2b0: 23ac movs r3, #172 ; 0xac\n",
- " 2b2: 23ae movs r3, #174 ; 0xae\n",
- " 2b4: 23b0 movs r3, #176 ; 0xb0\n",
- " 2b6: 23b2 movs r3, #178 ; 0xb2\n",
- " 2b8: 23b4 movs r3, #180 ; 0xb4\n",
- " 2ba: 23b6 movs r3, #182 ; 0xb6\n",
- " 2bc: 23b8 movs r3, #184 ; 0xb8\n",
- " 2be: 23ba movs r3, #186 ; 0xba\n",
- " 2c0: 23bc movs r3, #188 ; 0xbc\n",
- " 2c2: 23be movs r3, #190 ; 0xbe\n",
- " 2c4: 23c0 movs r3, #192 ; 0xc0\n",
- " 2c6: 23c2 movs r3, #194 ; 0xc2\n",
- " 2c8: 23c4 movs r3, #196 ; 0xc4\n",
- " 2ca: 23c6 movs r3, #198 ; 0xc6\n",
- " 2cc: 23c8 movs r3, #200 ; 0xc8\n",
- " 2ce: 23ca movs r3, #202 ; 0xca\n",
- " 2d0: 23cc movs r3, #204 ; 0xcc\n",
- " 2d2: 23ce movs r3, #206 ; 0xce\n",
- " 2d4: 23d0 movs r3, #208 ; 0xd0\n",
- " 2d6: 23d2 movs r3, #210 ; 0xd2\n",
- " 2d8: 23d4 movs r3, #212 ; 0xd4\n",
- " 2da: 23d6 movs r3, #214 ; 0xd6\n",
- " 2dc: 23d8 movs r3, #216 ; 0xd8\n",
- " 2de: 23da movs r3, #218 ; 0xda\n",
- " 2e0: 23dc movs r3, #220 ; 0xdc\n",
- " 2e2: 23de movs r3, #222 ; 0xde\n",
- " 2e4: 23e0 movs r3, #224 ; 0xe0\n",
- " 2e6: 23e2 movs r3, #226 ; 0xe2\n",
- " 2e8: 23e4 movs r3, #228 ; 0xe4\n",
- " 2ea: 23e6 movs r3, #230 ; 0xe6\n",
- " 2ec: 23e8 movs r3, #232 ; 0xe8\n",
- " 2ee: 23ea movs r3, #234 ; 0xea\n",
- " 2f0: 23ec movs r3, #236 ; 0xec\n",
- " 2f2: 23ee movs r3, #238 ; 0xee\n",
- " 2f4: 23f0 movs r3, #240 ; 0xf0\n",
- " 2f6: 23f2 movs r3, #242 ; 0xf2\n",
- " 2f8: 23f4 movs r3, #244 ; 0xf4\n",
- " 2fa: 23f6 movs r3, #246 ; 0xf6\n",
- " 2fc: 23f8 movs r3, #248 ; 0xf8\n",
- " 2fe: 23fa movs r3, #250 ; 0xfa\n",
- " 300: 23fc movs r3, #252 ; 0xfc\n",
- " 302: 23fe movs r3, #254 ; 0xfe\n",
- " 304: 2300 movs r3, #0\n",
- " 306: 2302 movs r3, #2\n",
- " 308: 2304 movs r3, #4\n",
- " 30a: 2306 movs r3, #6\n",
- " 30c: 2308 movs r3, #8\n",
- " 30e: 230a movs r3, #10\n",
- " 310: 230c movs r3, #12\n",
- " 312: 230e movs r3, #14\n",
- " 314: 2310 movs r3, #16\n",
- " 316: 2312 movs r3, #18\n",
- " 318: 2314 movs r3, #20\n",
- " 31a: 2316 movs r3, #22\n",
- " 31c: 2318 movs r3, #24\n",
- " 31e: 231a movs r3, #26\n",
- " 320: 231c movs r3, #28\n",
- " 322: 231e movs r3, #30\n",
- " 324: 2320 movs r3, #32\n",
- " 326: 2322 movs r3, #34 ; 0x22\n",
- " 328: 2324 movs r3, #36 ; 0x24\n",
- " 32a: 2326 movs r3, #38 ; 0x26\n",
- " 32c: 2328 movs r3, #40 ; 0x28\n",
- " 32e: 232a movs r3, #42 ; 0x2a\n",
- " 330: 232c movs r3, #44 ; 0x2c\n",
- " 332: 232e movs r3, #46 ; 0x2e\n",
- " 334: 2330 movs r3, #48 ; 0x30\n",
- " 336: 2332 movs r3, #50 ; 0x32\n",
- " 338: 2334 movs r3, #52 ; 0x34\n",
- " 33a: 2336 movs r3, #54 ; 0x36\n",
- " 33c: 2338 movs r3, #56 ; 0x38\n",
- " 33e: 233a movs r3, #58 ; 0x3a\n",
- " 340: 233c movs r3, #60 ; 0x3c\n",
- " 342: 233e movs r3, #62 ; 0x3e\n",
- " 344: 2340 movs r3, #64 ; 0x40\n",
- " 346: 2342 movs r3, #66 ; 0x42\n",
- " 348: 2344 movs r3, #68 ; 0x44\n",
- " 34a: 2346 movs r3, #70 ; 0x46\n",
- " 34c: 2348 movs r3, #72 ; 0x48\n",
- " 34e: 234a movs r3, #74 ; 0x4a\n",
- " 350: 234c movs r3, #76 ; 0x4c\n",
- " 352: 234e movs r3, #78 ; 0x4e\n",
- " 354: 2350 movs r3, #80 ; 0x50\n",
- " 356: 2352 movs r3, #82 ; 0x52\n",
- " 358: 2354 movs r3, #84 ; 0x54\n",
- " 35a: 2356 movs r3, #86 ; 0x56\n",
- " 35c: 2358 movs r3, #88 ; 0x58\n",
- " 35e: 235a movs r3, #90 ; 0x5a\n",
- " 360: 235c movs r3, #92 ; 0x5c\n",
- " 362: 235e movs r3, #94 ; 0x5e\n",
- " 364: 2360 movs r3, #96 ; 0x60\n",
- " 366: 2362 movs r3, #98 ; 0x62\n",
- " 368: 2364 movs r3, #100 ; 0x64\n",
- " 36a: 2366 movs r3, #102 ; 0x66\n",
- " 36c: 2368 movs r3, #104 ; 0x68\n",
- " 36e: 236a movs r3, #106 ; 0x6a\n",
- " 370: 236c movs r3, #108 ; 0x6c\n",
- " 372: 236e movs r3, #110 ; 0x6e\n",
- " 374: 2370 movs r3, #112 ; 0x70\n",
- " 376: 2372 movs r3, #114 ; 0x72\n",
- " 378: 2374 movs r3, #116 ; 0x74\n",
- " 37a: 2376 movs r3, #118 ; 0x76\n",
- " 37c: 2378 movs r3, #120 ; 0x78\n",
- " 37e: 237a movs r3, #122 ; 0x7a\n",
- " 380: 237c movs r3, #124 ; 0x7c\n",
- " 382: 237e movs r3, #126 ; 0x7e\n",
- " 384: 2380 movs r3, #128 ; 0x80\n",
- " 386: 2382 movs r3, #130 ; 0x82\n",
- " 388: 2384 movs r3, #132 ; 0x84\n",
- " 38a: 2386 movs r3, #134 ; 0x86\n",
- " 38c: 2388 movs r3, #136 ; 0x88\n",
- " 38e: 238a movs r3, #138 ; 0x8a\n",
- " 390: 238c movs r3, #140 ; 0x8c\n",
- " 392: 238e movs r3, #142 ; 0x8e\n",
- " 394: 2390 movs r3, #144 ; 0x90\n",
- " 396: 2392 movs r3, #146 ; 0x92\n",
- " 398: 2394 movs r3, #148 ; 0x94\n",
- " 39a: 2396 movs r3, #150 ; 0x96\n",
- " 39c: 2398 movs r3, #152 ; 0x98\n",
- " 39e: 239a movs r3, #154 ; 0x9a\n",
- " 3a0: 239c movs r3, #156 ; 0x9c\n",
- " 3a2: 239e movs r3, #158 ; 0x9e\n",
- " 3a4: 23a0 movs r3, #160 ; 0xa0\n",
- " 3a6: 23a2 movs r3, #162 ; 0xa2\n",
- " 3a8: 23a4 movs r3, #164 ; 0xa4\n",
- " 3aa: 23a6 movs r3, #166 ; 0xa6\n",
- " 3ac: 23a8 movs r3, #168 ; 0xa8\n",
- " 3ae: 23aa movs r3, #170 ; 0xaa\n",
- " 3b0: 23ac movs r3, #172 ; 0xac\n",
- " 3b2: 23ae movs r3, #174 ; 0xae\n",
- " 3b4: 23b0 movs r3, #176 ; 0xb0\n",
- " 3b6: 23b2 movs r3, #178 ; 0xb2\n",
- " 3b8: 23b4 movs r3, #180 ; 0xb4\n",
- " 3ba: 23b6 movs r3, #182 ; 0xb6\n",
- " 3bc: 23b8 movs r3, #184 ; 0xb8\n",
- " 3be: 23ba movs r3, #186 ; 0xba\n",
- " 3c0: 23bc movs r3, #188 ; 0xbc\n",
- " 3c2: 23be movs r3, #190 ; 0xbe\n",
- " 3c4: 23c0 movs r3, #192 ; 0xc0\n",
- " 3c6: 23c2 movs r3, #194 ; 0xc2\n",
- " 3c8: 23c4 movs r3, #196 ; 0xc4\n",
- " 3ca: 23c6 movs r3, #198 ; 0xc6\n",
- " 3cc: 23c8 movs r3, #200 ; 0xc8\n",
- " 3ce: 23ca movs r3, #202 ; 0xca\n",
- " 3d0: 23cc movs r3, #204 ; 0xcc\n",
- " 3d2: 23ce movs r3, #206 ; 0xce\n",
- " 3d4: 23d0 movs r3, #208 ; 0xd0\n",
- " 3d6: 23d2 movs r3, #210 ; 0xd2\n",
- " 3d8: 23d4 movs r3, #212 ; 0xd4\n",
- " 3da: 23d6 movs r3, #214 ; 0xd6\n",
- " 3dc: 23d8 movs r3, #216 ; 0xd8\n",
- " 3de: 23da movs r3, #218 ; 0xda\n",
- " 3e0: 23dc movs r3, #220 ; 0xdc\n",
- " 3e2: 23de movs r3, #222 ; 0xde\n",
- " 3e4: 23e0 movs r3, #224 ; 0xe0\n",
- " 3e6: 23e2 movs r3, #226 ; 0xe2\n",
- " 3e8: 23e4 movs r3, #228 ; 0xe4\n",
- " 3ea: 23e6 movs r3, #230 ; 0xe6\n",
- " 3ec: 23e8 movs r3, #232 ; 0xe8\n",
- " 3ee: 23ea movs r3, #234 ; 0xea\n",
- " 3f0: 23ec movs r3, #236 ; 0xec\n",
- " 3f2: 23ee movs r3, #238 ; 0xee\n",
- " 3f4: 23f0 movs r3, #240 ; 0xf0\n",
- " 3f6: 23f2 movs r3, #242 ; 0xf2\n",
- " 3f8: 23f4 movs r3, #244 ; 0xf4\n",
- " 3fa: 23f6 movs r3, #246 ; 0xf6\n",
- " 3fc: 23f8 movs r3, #248 ; 0xf8\n",
- " 3fe: 23fa movs r3, #250 ; 0xfa\n",
- " 400: 23fc movs r3, #252 ; 0xfc\n",
- " 402: 23fe movs r3, #254 ; 0xfe\n",
- " 404: 2300 movs r3, #0\n",
- " 406: 2302 movs r3, #2\n",
- " 408: 2304 movs r3, #4\n",
- " 40a: 2306 movs r3, #6\n",
- " 40c: 2308 movs r3, #8\n",
- " 40e: 230a movs r3, #10\n",
- " 410: 230c movs r3, #12\n",
- " 412: 230e movs r3, #14\n",
- " 414: 2310 movs r3, #16\n",
- " 416: 2312 movs r3, #18\n",
- " 418: 2314 movs r3, #20\n",
- " 41a: 2316 movs r3, #22\n",
- " 41c: 2318 movs r3, #24\n",
- " 41e: 231a movs r3, #26\n",
- " 420: 231c movs r3, #28\n",
- " 422: 231e movs r3, #30\n",
- " 424: 2320 movs r3, #32\n",
- " 426: 2322 movs r3, #34 ; 0x22\n",
- " 428: 2324 movs r3, #36 ; 0x24\n",
- " 42a: 2326 movs r3, #38 ; 0x26\n",
- " 42c: 2328 movs r3, #40 ; 0x28\n",
- " 42e: 232a movs r3, #42 ; 0x2a\n",
- " 430: 232c movs r3, #44 ; 0x2c\n",
- " 432: 232e movs r3, #46 ; 0x2e\n",
- " 434: 2330 movs r3, #48 ; 0x30\n",
- " 436: 2332 movs r3, #50 ; 0x32\n",
- " 438: 2334 movs r3, #52 ; 0x34\n",
- " 43a: 2336 movs r3, #54 ; 0x36\n",
- " 43c: 2338 movs r3, #56 ; 0x38\n",
- " 43e: 233a movs r3, #58 ; 0x3a\n",
- " 440: 233c movs r3, #60 ; 0x3c\n",
- " 442: 233e movs r3, #62 ; 0x3e\n",
- " 444: 2340 movs r3, #64 ; 0x40\n",
- " 446: 2342 movs r3, #66 ; 0x42\n",
- " 448: 2344 movs r3, #68 ; 0x44\n",
- " 44a: 2346 movs r3, #70 ; 0x46\n",
- " 44c: 2348 movs r3, #72 ; 0x48\n",
- " 44e: 234a movs r3, #74 ; 0x4a\n",
- " 450: 234c movs r3, #76 ; 0x4c\n",
- " 452: 234e movs r3, #78 ; 0x4e\n",
- " 454: 2350 movs r3, #80 ; 0x50\n",
- " 456: 2352 movs r3, #82 ; 0x52\n",
- " 458: 2354 movs r3, #84 ; 0x54\n",
- " 45a: 2356 movs r3, #86 ; 0x56\n",
- " 45c: 2358 movs r3, #88 ; 0x58\n",
- " 45e: 235a movs r3, #90 ; 0x5a\n",
- " 460: 235c movs r3, #92 ; 0x5c\n",
- " 462: 235e movs r3, #94 ; 0x5e\n",
- " 464: 2360 movs r3, #96 ; 0x60\n",
- " 466: 2362 movs r3, #98 ; 0x62\n",
- " 468: 2364 movs r3, #100 ; 0x64\n",
- " 46a: 2366 movs r3, #102 ; 0x66\n",
- " 46c: 2368 movs r3, #104 ; 0x68\n",
- " 46e: 236a movs r3, #106 ; 0x6a\n",
- " 470: 236c movs r3, #108 ; 0x6c\n",
- " 472: 236e movs r3, #110 ; 0x6e\n",
- " 474: 2370 movs r3, #112 ; 0x70\n",
- " 476: 2372 movs r3, #114 ; 0x72\n",
- " 478: 2374 movs r3, #116 ; 0x74\n",
- " 47a: 2376 movs r3, #118 ; 0x76\n",
- " 47c: 2378 movs r3, #120 ; 0x78\n",
- " 47e: 237a movs r3, #122 ; 0x7a\n",
- " 480: 237c movs r3, #124 ; 0x7c\n",
- " 482: 237e movs r3, #126 ; 0x7e\n",
- " 484: 2380 movs r3, #128 ; 0x80\n",
- " 486: 2382 movs r3, #130 ; 0x82\n",
- " 488: 2384 movs r3, #132 ; 0x84\n",
- " 48a: 2386 movs r3, #134 ; 0x86\n",
- " 48c: 2388 movs r3, #136 ; 0x88\n",
- " 48e: 238a movs r3, #138 ; 0x8a\n",
- " 490: 238c movs r3, #140 ; 0x8c\n",
- " 492: 238e movs r3, #142 ; 0x8e\n",
- " 494: 2390 movs r3, #144 ; 0x90\n",
- " 496: 2392 movs r3, #146 ; 0x92\n",
- " 498: 2394 movs r3, #148 ; 0x94\n",
- " 49a: 2396 movs r3, #150 ; 0x96\n",
- " 49c: 2398 movs r3, #152 ; 0x98\n",
- " 49e: 239a movs r3, #154 ; 0x9a\n",
- " 4a0: 239c movs r3, #156 ; 0x9c\n",
- " 4a2: 239e movs r3, #158 ; 0x9e\n",
- " 4a4: 23a0 movs r3, #160 ; 0xa0\n",
- " 4a6: 23a2 movs r3, #162 ; 0xa2\n",
- " 4a8: 23a4 movs r3, #164 ; 0xa4\n",
- " 4aa: 23a6 movs r3, #166 ; 0xa6\n",
- " 4ac: 23a8 movs r3, #168 ; 0xa8\n",
- " 4ae: 23aa movs r3, #170 ; 0xaa\n",
- " 4b0: 23ac movs r3, #172 ; 0xac\n",
- " 4b2: 23ae movs r3, #174 ; 0xae\n",
- " 4b4: 23b0 movs r3, #176 ; 0xb0\n",
- " 4b6: 23b2 movs r3, #178 ; 0xb2\n",
- " 4b8: 23b4 movs r3, #180 ; 0xb4\n",
- " 4ba: 23b6 movs r3, #182 ; 0xb6\n",
- " 4bc: 23b8 movs r3, #184 ; 0xb8\n",
- " 4be: 23ba movs r3, #186 ; 0xba\n",
- " 4c0: 23bc movs r3, #188 ; 0xbc\n",
- " 4c2: 23be movs r3, #190 ; 0xbe\n",
- " 4c4: 23c0 movs r3, #192 ; 0xc0\n",
- " 4c6: 23c2 movs r3, #194 ; 0xc2\n",
- " 4c8: 23c4 movs r3, #196 ; 0xc4\n",
- " 4ca: 23c6 movs r3, #198 ; 0xc6\n",
- " 4cc: 23c8 movs r3, #200 ; 0xc8\n",
- " 4ce: 23ca movs r3, #202 ; 0xca\n",
- " 4d0: 23cc movs r3, #204 ; 0xcc\n",
- " 4d2: 23ce movs r3, #206 ; 0xce\n",
- " 4d4: 23d0 movs r3, #208 ; 0xd0\n",
- " 4d6: 23d2 movs r3, #210 ; 0xd2\n",
- " 4d8: 23d4 movs r3, #212 ; 0xd4\n",
- " 4da: 23d6 movs r3, #214 ; 0xd6\n",
- " 4dc: 23d8 movs r3, #216 ; 0xd8\n",
- " 4de: 23da movs r3, #218 ; 0xda\n",
- " 4e0: 23dc movs r3, #220 ; 0xdc\n",
- " 4e2: 23de movs r3, #222 ; 0xde\n",
- " 4e4: 23e0 movs r3, #224 ; 0xe0\n",
- " 4e6: 23e2 movs r3, #226 ; 0xe2\n",
- " 4e8: 23e4 movs r3, #228 ; 0xe4\n",
- " 4ea: 23e6 movs r3, #230 ; 0xe6\n",
- " 4ec: 23e8 movs r3, #232 ; 0xe8\n",
- " 4ee: 23ea movs r3, #234 ; 0xea\n",
- " 4f0: 23ec movs r3, #236 ; 0xec\n",
- " 4f2: 23ee movs r3, #238 ; 0xee\n",
- " 4f4: 23f0 movs r3, #240 ; 0xf0\n",
- " 4f6: 23f2 movs r3, #242 ; 0xf2\n",
- " 4f8: 23f4 movs r3, #244 ; 0xf4\n",
- " 4fa: 23f6 movs r3, #246 ; 0xf6\n",
- " 4fc: 23f8 movs r3, #248 ; 0xf8\n",
- " 4fe: 23fa movs r3, #250 ; 0xfa\n",
- " 500: 23fc movs r3, #252 ; 0xfc\n",
- " 502: 23fe movs r3, #254 ; 0xfe\n",
- " 504: 2300 movs r3, #0\n",
- " 506: 2302 movs r3, #2\n",
- " 508: 2304 movs r3, #4\n",
- " 50a: 2306 movs r3, #6\n",
- " 50c: 2308 movs r3, #8\n",
- " 50e: 230a movs r3, #10\n",
- " 510: 230c movs r3, #12\n",
- " 512: 230e movs r3, #14\n",
- " 514: 2310 movs r3, #16\n",
- " 516: 2312 movs r3, #18\n",
- " 518: 2314 movs r3, #20\n",
- " 51a: 2316 movs r3, #22\n",
- " 51c: 2318 movs r3, #24\n",
- " 51e: 231a movs r3, #26\n",
- " 520: 231c movs r3, #28\n",
- " 522: 231e movs r3, #30\n",
- " 524: 2320 movs r3, #32\n",
- " 526: 2322 movs r3, #34 ; 0x22\n",
- " 528: 2324 movs r3, #36 ; 0x24\n",
- " 52a: 2326 movs r3, #38 ; 0x26\n",
- " 52c: 2328 movs r3, #40 ; 0x28\n",
- " 52e: 232a movs r3, #42 ; 0x2a\n",
- " 530: 232c movs r3, #44 ; 0x2c\n",
- " 532: 232e movs r3, #46 ; 0x2e\n",
- " 534: 2330 movs r3, #48 ; 0x30\n",
- " 536: 2332 movs r3, #50 ; 0x32\n",
- " 538: 2334 movs r3, #52 ; 0x34\n",
- " 53a: 2336 movs r3, #54 ; 0x36\n",
- " 53c: 2338 movs r3, #56 ; 0x38\n",
- " 53e: 233a movs r3, #58 ; 0x3a\n",
- " 540: 233c movs r3, #60 ; 0x3c\n",
- " 542: 233e movs r3, #62 ; 0x3e\n",
- " 544: 2340 movs r3, #64 ; 0x40\n",
- " 546: 2342 movs r3, #66 ; 0x42\n",
- " 548: 2344 movs r3, #68 ; 0x44\n",
- " 54a: 2346 movs r3, #70 ; 0x46\n",
- " 54c: 2348 movs r3, #72 ; 0x48\n",
- " 54e: 234a movs r3, #74 ; 0x4a\n",
- " 550: 234c movs r3, #76 ; 0x4c\n",
- " 552: 234e movs r3, #78 ; 0x4e\n",
- " 554: 2350 movs r3, #80 ; 0x50\n",
- " 556: 2352 movs r3, #82 ; 0x52\n",
- " 558: 2354 movs r3, #84 ; 0x54\n",
- " 55a: 2356 movs r3, #86 ; 0x56\n",
- " 55c: 2358 movs r3, #88 ; 0x58\n",
- " 55e: 235a movs r3, #90 ; 0x5a\n",
- " 560: 235c movs r3, #92 ; 0x5c\n",
- " 562: 235e movs r3, #94 ; 0x5e\n",
- " 564: 2360 movs r3, #96 ; 0x60\n",
- " 566: 2362 movs r3, #98 ; 0x62\n",
- " 568: 2364 movs r3, #100 ; 0x64\n",
- " 56a: 2366 movs r3, #102 ; 0x66\n",
- " 56c: 2368 movs r3, #104 ; 0x68\n",
- " 56e: 236a movs r3, #106 ; 0x6a\n",
- " 570: 236c movs r3, #108 ; 0x6c\n",
- " 572: 236e movs r3, #110 ; 0x6e\n",
- " 574: 2370 movs r3, #112 ; 0x70\n",
- " 576: 2372 movs r3, #114 ; 0x72\n",
- " 578: 2374 movs r3, #116 ; 0x74\n",
- " 57a: 2376 movs r3, #118 ; 0x76\n",
- " 57c: 2378 movs r3, #120 ; 0x78\n",
- " 57e: 237a movs r3, #122 ; 0x7a\n",
- " 580: 237c movs r3, #124 ; 0x7c\n",
- " 582: 237e movs r3, #126 ; 0x7e\n",
- " 584: 2380 movs r3, #128 ; 0x80\n",
- " 586: 2382 movs r3, #130 ; 0x82\n",
- " 588: 2384 movs r3, #132 ; 0x84\n",
- " 58a: 2386 movs r3, #134 ; 0x86\n",
- " 58c: 2388 movs r3, #136 ; 0x88\n",
- " 58e: 238a movs r3, #138 ; 0x8a\n",
- " 590: 238c movs r3, #140 ; 0x8c\n",
- " 592: 238e movs r3, #142 ; 0x8e\n",
- " 594: 2390 movs r3, #144 ; 0x90\n",
- " 596: 2392 movs r3, #146 ; 0x92\n",
- " 598: 2394 movs r3, #148 ; 0x94\n",
- " 59a: 2396 movs r3, #150 ; 0x96\n",
- " 59c: 2398 movs r3, #152 ; 0x98\n",
- " 59e: 239a movs r3, #154 ; 0x9a\n",
- " 5a0: 239c movs r3, #156 ; 0x9c\n",
- " 5a2: 239e movs r3, #158 ; 0x9e\n",
- " 5a4: 23a0 movs r3, #160 ; 0xa0\n",
- " 5a6: 23a2 movs r3, #162 ; 0xa2\n",
- " 5a8: 23a4 movs r3, #164 ; 0xa4\n",
- " 5aa: 23a6 movs r3, #166 ; 0xa6\n",
- " 5ac: 23a8 movs r3, #168 ; 0xa8\n",
- " 5ae: 23aa movs r3, #170 ; 0xaa\n",
- " 5b0: 23ac movs r3, #172 ; 0xac\n",
- " 5b2: 23ae movs r3, #174 ; 0xae\n",
- " 5b4: 23b0 movs r3, #176 ; 0xb0\n",
- " 5b6: 23b2 movs r3, #178 ; 0xb2\n",
- " 5b8: 23b4 movs r3, #180 ; 0xb4\n",
- " 5ba: 23b6 movs r3, #182 ; 0xb6\n",
- " 5bc: 23b8 movs r3, #184 ; 0xb8\n",
- " 5be: 23ba movs r3, #186 ; 0xba\n",
- " 5c0: 23bc movs r3, #188 ; 0xbc\n",
- " 5c2: 23be movs r3, #190 ; 0xbe\n",
- " 5c4: 23c0 movs r3, #192 ; 0xc0\n",
- " 5c6: 23c2 movs r3, #194 ; 0xc2\n",
- " 5c8: 23c4 movs r3, #196 ; 0xc4\n",
- " 5ca: 23c6 movs r3, #198 ; 0xc6\n",
- " 5cc: 23c8 movs r3, #200 ; 0xc8\n",
- " 5ce: 23ca movs r3, #202 ; 0xca\n",
- " 5d0: 23cc movs r3, #204 ; 0xcc\n",
- " 5d2: 23ce movs r3, #206 ; 0xce\n",
- " 5d4: 23d0 movs r3, #208 ; 0xd0\n",
- " 5d6: 23d2 movs r3, #210 ; 0xd2\n",
- " 5d8: 23d4 movs r3, #212 ; 0xd4\n",
- " 5da: 23d6 movs r3, #214 ; 0xd6\n",
- " 5dc: 23d8 movs r3, #216 ; 0xd8\n",
- " 5de: 23da movs r3, #218 ; 0xda\n",
- " 5e0: 23dc movs r3, #220 ; 0xdc\n",
- " 5e2: 23de movs r3, #222 ; 0xde\n",
- " 5e4: 23e0 movs r3, #224 ; 0xe0\n",
- " 5e6: 23e2 movs r3, #226 ; 0xe2\n",
- " 5e8: 23e4 movs r3, #228 ; 0xe4\n",
- " 5ea: 23e6 movs r3, #230 ; 0xe6\n",
- " 5ec: 23e8 movs r3, #232 ; 0xe8\n",
- " 5ee: 23ea movs r3, #234 ; 0xea\n",
- " 5f0: 23ec movs r3, #236 ; 0xec\n",
- " 5f2: 23ee movs r3, #238 ; 0xee\n",
- " 5f4: 23f0 movs r3, #240 ; 0xf0\n",
- " 5f6: 23f2 movs r3, #242 ; 0xf2\n",
- " 5f8: 23f4 movs r3, #244 ; 0xf4\n",
- " 5fa: 23f6 movs r3, #246 ; 0xf6\n",
- " 5fc: 23f8 movs r3, #248 ; 0xf8\n",
- " 5fe: 23fa movs r3, #250 ; 0xfa\n",
- " 600: 23fc movs r3, #252 ; 0xfc\n",
- " 602: 23fe movs r3, #254 ; 0xfe\n",
- " 604: 2300 movs r3, #0\n",
- " 606: 2302 movs r3, #2\n",
- " 608: 2304 movs r3, #4\n",
- " 60a: 2306 movs r3, #6\n",
- " 60c: 2308 movs r3, #8\n",
- " 60e: 230a movs r3, #10\n",
- " 610: 230c movs r3, #12\n",
- " 612: 230e movs r3, #14\n",
- " 614: 2310 movs r3, #16\n",
- " 616: 2312 movs r3, #18\n",
- " 618: 2314 movs r3, #20\n",
- " 61a: 2316 movs r3, #22\n",
- " 61c: 2318 movs r3, #24\n",
- " 61e: 231a movs r3, #26\n",
- " 620: 231c movs r3, #28\n",
- " 622: 231e movs r3, #30\n",
- " 624: 2320 movs r3, #32\n",
- " 626: 2322 movs r3, #34 ; 0x22\n",
- " 628: 2324 movs r3, #36 ; 0x24\n",
- " 62a: 2326 movs r3, #38 ; 0x26\n",
- " 62c: 2328 movs r3, #40 ; 0x28\n",
- " 62e: 232a movs r3, #42 ; 0x2a\n",
- " 630: 232c movs r3, #44 ; 0x2c\n",
- " 632: 232e movs r3, #46 ; 0x2e\n",
- " 634: 2330 movs r3, #48 ; 0x30\n",
- " 636: 2332 movs r3, #50 ; 0x32\n",
- " 638: 2334 movs r3, #52 ; 0x34\n",
- " 63a: 2336 movs r3, #54 ; 0x36\n",
- " 63c: 2338 movs r3, #56 ; 0x38\n",
- " 63e: 233a movs r3, #58 ; 0x3a\n",
- " 640: 233c movs r3, #60 ; 0x3c\n",
- " 642: 233e movs r3, #62 ; 0x3e\n",
- " 644: 2340 movs r3, #64 ; 0x40\n",
- " 646: 2342 movs r3, #66 ; 0x42\n",
- " 648: 2344 movs r3, #68 ; 0x44\n",
- " 64a: 2346 movs r3, #70 ; 0x46\n",
- " 64c: 2348 movs r3, #72 ; 0x48\n",
- " 64e: 234a movs r3, #74 ; 0x4a\n",
- " 650: 234c movs r3, #76 ; 0x4c\n",
- " 652: 234e movs r3, #78 ; 0x4e\n",
- " 654: 2350 movs r3, #80 ; 0x50\n",
- " 656: 2352 movs r3, #82 ; 0x52\n",
- " 658: 2354 movs r3, #84 ; 0x54\n",
- " 65a: 2356 movs r3, #86 ; 0x56\n",
- " 65c: 2358 movs r3, #88 ; 0x58\n",
- " 65e: 235a movs r3, #90 ; 0x5a\n",
- " 660: 235c movs r3, #92 ; 0x5c\n",
- " 662: 235e movs r3, #94 ; 0x5e\n",
- " 664: 2360 movs r3, #96 ; 0x60\n",
- " 666: 2362 movs r3, #98 ; 0x62\n",
- " 668: 2364 movs r3, #100 ; 0x64\n",
- " 66a: 2366 movs r3, #102 ; 0x66\n",
- " 66c: 2368 movs r3, #104 ; 0x68\n",
- " 66e: 236a movs r3, #106 ; 0x6a\n",
- " 670: 236c movs r3, #108 ; 0x6c\n",
- " 672: 236e movs r3, #110 ; 0x6e\n",
- " 674: 2370 movs r3, #112 ; 0x70\n",
- " 676: 2372 movs r3, #114 ; 0x72\n",
- " 678: 2374 movs r3, #116 ; 0x74\n",
- " 67a: 2376 movs r3, #118 ; 0x76\n",
- " 67c: 2378 movs r3, #120 ; 0x78\n",
- " 67e: 237a movs r3, #122 ; 0x7a\n",
- " 680: 237c movs r3, #124 ; 0x7c\n",
- " 682: 237e movs r3, #126 ; 0x7e\n",
- " 684: 2380 movs r3, #128 ; 0x80\n",
- " 686: 2382 movs r3, #130 ; 0x82\n",
- " 688: 2384 movs r3, #132 ; 0x84\n",
- " 68a: 2386 movs r3, #134 ; 0x86\n",
- " 68c: 2388 movs r3, #136 ; 0x88\n",
- " 68e: 238a movs r3, #138 ; 0x8a\n",
- " 690: 238c movs r3, #140 ; 0x8c\n",
- " 692: 238e movs r3, #142 ; 0x8e\n",
- " 694: 2390 movs r3, #144 ; 0x90\n",
- " 696: 2392 movs r3, #146 ; 0x92\n",
- " 698: 2394 movs r3, #148 ; 0x94\n",
- " 69a: 2396 movs r3, #150 ; 0x96\n",
- " 69c: 2398 movs r3, #152 ; 0x98\n",
- " 69e: 239a movs r3, #154 ; 0x9a\n",
- " 6a0: 239c movs r3, #156 ; 0x9c\n",
- " 6a2: 239e movs r3, #158 ; 0x9e\n",
- " 6a4: 23a0 movs r3, #160 ; 0xa0\n",
- " 6a6: 23a2 movs r3, #162 ; 0xa2\n",
- " 6a8: 23a4 movs r3, #164 ; 0xa4\n",
- " 6aa: 23a6 movs r3, #166 ; 0xa6\n",
- " 6ac: 23a8 movs r3, #168 ; 0xa8\n",
- " 6ae: 23aa movs r3, #170 ; 0xaa\n",
- " 6b0: 23ac movs r3, #172 ; 0xac\n",
- " 6b2: 23ae movs r3, #174 ; 0xae\n",
- " 6b4: 23b0 movs r3, #176 ; 0xb0\n",
- " 6b6: 23b2 movs r3, #178 ; 0xb2\n",
- " 6b8: 23b4 movs r3, #180 ; 0xb4\n",
- " 6ba: 23b6 movs r3, #182 ; 0xb6\n",
- " 6bc: 23b8 movs r3, #184 ; 0xb8\n",
- " 6be: 23ba movs r3, #186 ; 0xba\n",
- " 6c0: 23bc movs r3, #188 ; 0xbc\n",
- " 6c2: 23be movs r3, #190 ; 0xbe\n",
- " 6c4: 23c0 movs r3, #192 ; 0xc0\n",
- " 6c6: 23c2 movs r3, #194 ; 0xc2\n",
- " 6c8: 23c4 movs r3, #196 ; 0xc4\n",
- " 6ca: 23c6 movs r3, #198 ; 0xc6\n",
- " 6cc: 23c8 movs r3, #200 ; 0xc8\n",
- " 6ce: 23ca movs r3, #202 ; 0xca\n",
- " 6d0: 23cc movs r3, #204 ; 0xcc\n",
- " 6d2: 23ce movs r3, #206 ; 0xce\n",
- " 6d4: 23d0 movs r3, #208 ; 0xd0\n",
- " 6d6: 23d2 movs r3, #210 ; 0xd2\n",
- " 6d8: 23d4 movs r3, #212 ; 0xd4\n",
- " 6da: 23d6 movs r3, #214 ; 0xd6\n",
- " 6dc: 23d8 movs r3, #216 ; 0xd8\n",
- " 6de: 23da movs r3, #218 ; 0xda\n",
- " 6e0: 23dc movs r3, #220 ; 0xdc\n",
- " 6e2: 23de movs r3, #222 ; 0xde\n",
- " 6e4: 23e0 movs r3, #224 ; 0xe0\n",
- " 6e6: 23e2 movs r3, #226 ; 0xe2\n",
- " 6e8: 23e4 movs r3, #228 ; 0xe4\n",
- " 6ea: 23e6 movs r3, #230 ; 0xe6\n",
- " 6ec: 23e8 movs r3, #232 ; 0xe8\n",
- " 6ee: 23ea movs r3, #234 ; 0xea\n",
- " 6f0: 23ec movs r3, #236 ; 0xec\n",
- " 6f2: 23ee movs r3, #238 ; 0xee\n",
- " 6f4: 23f0 movs r3, #240 ; 0xf0\n",
- " 6f6: 23f2 movs r3, #242 ; 0xf2\n",
- " 6f8: 23f4 movs r3, #244 ; 0xf4\n",
- " 6fa: 23f6 movs r3, #246 ; 0xf6\n",
- " 6fc: 23f8 movs r3, #248 ; 0xf8\n",
- " 6fe: 23fa movs r3, #250 ; 0xfa\n",
- " 700: 23fc movs r3, #252 ; 0xfc\n",
- " 702: 23fe movs r3, #254 ; 0xfe\n",
- " 704: 2300 movs r3, #0\n",
- " 706: 2302 movs r3, #2\n",
- " 708: 2304 movs r3, #4\n",
- " 70a: 2306 movs r3, #6\n",
- " 70c: 2308 movs r3, #8\n",
- " 70e: 230a movs r3, #10\n",
- " 710: 230c movs r3, #12\n",
- " 712: 230e movs r3, #14\n",
- " 714: 2310 movs r3, #16\n",
- " 716: 2312 movs r3, #18\n",
- " 718: 2314 movs r3, #20\n",
- " 71a: 2316 movs r3, #22\n",
- " 71c: 2318 movs r3, #24\n",
- " 71e: 231a movs r3, #26\n",
- " 720: 231c movs r3, #28\n",
- " 722: 231e movs r3, #30\n",
- " 724: 2320 movs r3, #32\n",
- " 726: 2322 movs r3, #34 ; 0x22\n",
- " 728: 2324 movs r3, #36 ; 0x24\n",
- " 72a: 2326 movs r3, #38 ; 0x26\n",
- " 72c: 2328 movs r3, #40 ; 0x28\n",
- " 72e: 232a movs r3, #42 ; 0x2a\n",
- " 730: 232c movs r3, #44 ; 0x2c\n",
- " 732: 232e movs r3, #46 ; 0x2e\n",
- " 734: 2330 movs r3, #48 ; 0x30\n",
- " 736: 2332 movs r3, #50 ; 0x32\n",
- " 738: 2334 movs r3, #52 ; 0x34\n",
- " 73a: 2336 movs r3, #54 ; 0x36\n",
- " 73c: 2338 movs r3, #56 ; 0x38\n",
- " 73e: 233a movs r3, #58 ; 0x3a\n",
- " 740: 233c movs r3, #60 ; 0x3c\n",
- " 742: 233e movs r3, #62 ; 0x3e\n",
- " 744: 2340 movs r3, #64 ; 0x40\n",
- " 746: 2342 movs r3, #66 ; 0x42\n",
- " 748: 2344 movs r3, #68 ; 0x44\n",
- " 74a: 2346 movs r3, #70 ; 0x46\n",
- " 74c: 2348 movs r3, #72 ; 0x48\n",
- " 74e: 234a movs r3, #74 ; 0x4a\n",
- " 750: 234c movs r3, #76 ; 0x4c\n",
- " 752: 234e movs r3, #78 ; 0x4e\n",
- " 754: 2350 movs r3, #80 ; 0x50\n",
- " 756: 2352 movs r3, #82 ; 0x52\n",
- " 758: 2354 movs r3, #84 ; 0x54\n",
- " 75a: 2356 movs r3, #86 ; 0x56\n",
- " 75c: 2358 movs r3, #88 ; 0x58\n",
- " 75e: 235a movs r3, #90 ; 0x5a\n",
- " 760: 235c movs r3, #92 ; 0x5c\n",
- " 762: 235e movs r3, #94 ; 0x5e\n",
- " 764: 2360 movs r3, #96 ; 0x60\n",
- " 766: 2362 movs r3, #98 ; 0x62\n",
- " 768: 2364 movs r3, #100 ; 0x64\n",
- " 76a: 2366 movs r3, #102 ; 0x66\n",
- " 76c: 2368 movs r3, #104 ; 0x68\n",
- " 76e: 236a movs r3, #106 ; 0x6a\n",
- " 770: 236c movs r3, #108 ; 0x6c\n",
- " 772: 236e movs r3, #110 ; 0x6e\n",
- " 774: 2370 movs r3, #112 ; 0x70\n",
- " 776: 2372 movs r3, #114 ; 0x72\n",
- " 778: 2374 movs r3, #116 ; 0x74\n",
- " 77a: 2376 movs r3, #118 ; 0x76\n",
- " 77c: 2378 movs r3, #120 ; 0x78\n",
- " 77e: 237a movs r3, #122 ; 0x7a\n",
- " 780: 237c movs r3, #124 ; 0x7c\n",
- " 782: 237e movs r3, #126 ; 0x7e\n",
- " 784: 2380 movs r3, #128 ; 0x80\n",
- " 786: 2382 movs r3, #130 ; 0x82\n",
- " 788: 2384 movs r3, #132 ; 0x84\n",
- " 78a: 2386 movs r3, #134 ; 0x86\n",
- " 78c: 2388 movs r3, #136 ; 0x88\n",
- " 78e: 238a movs r3, #138 ; 0x8a\n",
- " 790: 238c movs r3, #140 ; 0x8c\n",
- " 792: 238e movs r3, #142 ; 0x8e\n",
- " 794: 2390 movs r3, #144 ; 0x90\n",
- " 796: 2392 movs r3, #146 ; 0x92\n",
- " 798: 2394 movs r3, #148 ; 0x94\n",
- " 79a: 2396 movs r3, #150 ; 0x96\n",
- " 79c: 2398 movs r3, #152 ; 0x98\n",
- " 79e: 239a movs r3, #154 ; 0x9a\n",
- " 7a0: 239c movs r3, #156 ; 0x9c\n",
- " 7a2: 239e movs r3, #158 ; 0x9e\n",
- " 7a4: 23a0 movs r3, #160 ; 0xa0\n",
- " 7a6: 23a2 movs r3, #162 ; 0xa2\n",
- " 7a8: 23a4 movs r3, #164 ; 0xa4\n",
- " 7aa: 23a6 movs r3, #166 ; 0xa6\n",
- " 7ac: 23a8 movs r3, #168 ; 0xa8\n",
- " 7ae: 23aa movs r3, #170 ; 0xaa\n",
- " 7b0: 23ac movs r3, #172 ; 0xac\n",
- " 7b2: 23ae movs r3, #174 ; 0xae\n",
- " 7b4: 23b0 movs r3, #176 ; 0xb0\n",
- " 7b6: 23b2 movs r3, #178 ; 0xb2\n",
- " 7b8: 23b4 movs r3, #180 ; 0xb4\n",
- " 7ba: 23b6 movs r3, #182 ; 0xb6\n",
- " 7bc: 23b8 movs r3, #184 ; 0xb8\n",
- " 7be: 23ba movs r3, #186 ; 0xba\n",
- " 7c0: 23bc movs r3, #188 ; 0xbc\n",
- " 7c2: 23be movs r3, #190 ; 0xbe\n",
- " 7c4: 23c0 movs r3, #192 ; 0xc0\n",
- " 7c6: 23c2 movs r3, #194 ; 0xc2\n",
- " 7c8: 23c4 movs r3, #196 ; 0xc4\n",
- " 7ca: 23c6 movs r3, #198 ; 0xc6\n",
- " 7cc: 23c8 movs r3, #200 ; 0xc8\n",
- " 7ce: 23ca movs r3, #202 ; 0xca\n",
- " 7d0: 23cc movs r3, #204 ; 0xcc\n",
- " 7d2: 23ce movs r3, #206 ; 0xce\n",
- " 7d4: 23d0 movs r3, #208 ; 0xd0\n",
- " 7d6: 23d2 movs r3, #210 ; 0xd2\n",
- " 7d8: 23d4 movs r3, #212 ; 0xd4\n",
- " 7da: 23d6 movs r3, #214 ; 0xd6\n",
- " 7dc: 23d8 movs r3, #216 ; 0xd8\n",
- " 7de: 23da movs r3, #218 ; 0xda\n",
- " 7e0: 23dc movs r3, #220 ; 0xdc\n",
- " 7e2: 23de movs r3, #222 ; 0xde\n",
- " 7e4: 23e0 movs r3, #224 ; 0xe0\n",
- " 7e6: 23e2 movs r3, #226 ; 0xe2\n",
- " 7e8: 23e4 movs r3, #228 ; 0xe4\n",
- " 7ea: 23e6 movs r3, #230 ; 0xe6\n",
- " 7ec: 23e8 movs r3, #232 ; 0xe8\n",
- " 7ee: 23ea movs r3, #234 ; 0xea\n",
- " 7f0: 23ec movs r3, #236 ; 0xec\n",
- " 7f2: 23ee movs r3, #238 ; 0xee\n",
- " 7f4: 23f0 movs r3, #240 ; 0xf0\n",
- " 7f6: 23f2 movs r3, #242 ; 0xf2\n",
- " 7f8: 23f4 movs r3, #244 ; 0xf4\n",
- " 7fa: 23f6 movs r3, #246 ; 0xf6\n",
- " 7fc: 23f8 movs r3, #248 ; 0xf8\n",
- " 7fe: 23fa movs r3, #250 ; 0xfa\n",
- " 800: 23fc movs r3, #252 ; 0xfc\n",
- " 802: 23fe movs r3, #254 ; 0xfe\n",
- " 804: 2300 movs r3, #0\n",
- " 806: f7ff bbfd b.w 4 <MixedBranch32+0x4>\n",
- " 80a: 4611 mov r1, r2\n",
- nullptr
-};
-const char* const ShiftsResults[] = {
- " 0: 0148 lsls r0, r1, #5\n",
- " 2: 0948 lsrs r0, r1, #5\n",
- " 4: 1148 asrs r0, r1, #5\n",
- " 6: 4088 lsls r0, r1\n",
- " 8: 40c8 lsrs r0, r1\n",
- " a: 4108 asrs r0, r1\n",
- " c: 41c8 rors r0, r1\n",
- " e: 0148 lsls r0, r1, #5\n",
- " 10: 0948 lsrs r0, r1, #5\n",
- " 12: 1148 asrs r0, r1, #5\n",
- " 14: 4088 lsls r0, r1\n",
- " 16: 40c8 lsrs r0, r1\n",
- " 18: 4108 asrs r0, r1\n",
- " 1a: 41c8 rors r0, r1\n",
- " 1c: ea4f 1041 mov.w r0, r1, lsl #5\n",
- " 20: ea4f 1051 mov.w r0, r1, lsr #5\n",
- " 24: ea4f 1061 mov.w r0, r1, asr #5\n",
- " 28: fa00 f001 lsl.w r0, r0, r1\n",
- " 2c: fa20 f001 lsr.w r0, r0, r1\n",
- " 30: fa40 f001 asr.w r0, r0, r1\n",
- " 34: fa60 f001 ror.w r0, r0, r1\n",
- " 38: ea4f 1071 mov.w r0, r1, ror #5\n",
- " 3c: ea5f 1071 movs.w r0, r1, ror #5\n",
- " 40: ea4f 1071 mov.w r0, r1, ror #5\n",
- " 44: ea4f 1841 mov.w r8, r1, lsl #5\n",
- " 48: ea4f 1058 mov.w r0, r8, lsr #5\n",
- " 4c: ea4f 1861 mov.w r8, r1, asr #5\n",
- " 50: ea4f 1078 mov.w r0, r8, ror #5\n",
- " 54: fa01 f002 lsl.w r0, r1, r2\n",
- " 58: fa21 f002 lsr.w r0, r1, r2\n",
- " 5c: fa41 f002 asr.w r0, r1, r2\n",
- " 60: fa61 f002 ror.w r0, r1, r2\n",
- " 64: fa01 f802 lsl.w r8, r1, r2\n",
- " 68: fa28 f002 lsr.w r0, r8, r2\n",
- " 6c: fa41 f008 asr.w r0, r1, r8\n",
- " 70: ea5f 1841 movs.w r8, r1, lsl #5\n",
- " 74: ea5f 1058 movs.w r0, r8, lsr #5\n",
- " 78: ea5f 1861 movs.w r8, r1, asr #5\n",
- " 7c: ea5f 1078 movs.w r0, r8, ror #5\n",
- " 80: fa11 f002 lsls.w r0, r1, r2\n",
- " 84: fa31 f002 lsrs.w r0, r1, r2\n",
- " 88: fa51 f002 asrs.w r0, r1, r2\n",
- " 8c: fa71 f002 rors.w r0, r1, r2\n",
- " 90: fa11 f802 lsls.w r8, r1, r2\n",
- " 94: fa38 f002 lsrs.w r0, r8, r2\n",
- " 98: fa51 f008 asrs.w r0, r1, r8\n",
- nullptr
-};
-const char* const LoadStoreRegOffsetResults[] = {
- " 0: 5888 ldr r0, [r1, r2]\n",
- " 2: 5088 str r0, [r1, r2]\n",
- " 4: f851 0012 ldr.w r0, [r1, r2, lsl #1]\n",
- " 8: f841 0012 str.w r0, [r1, r2, lsl #1]\n",
- " c: f851 0032 ldr.w r0, [r1, r2, lsl #3]\n",
- " 10: f841 0032 str.w r0, [r1, r2, lsl #3]\n",
- " 14: f851 8002 ldr.w r8, [r1, r2]\n",
- " 18: f841 8002 str.w r8, [r1, r2]\n",
- " 1c: f858 1002 ldr.w r1, [r8, r2]\n",
- " 20: f848 2002 str.w r2, [r8, r2]\n",
- " 24: f851 0008 ldr.w r0, [r1, r8]\n",
- " 28: f841 0008 str.w r0, [r1, r8]\n",
- nullptr
-};
-const char* const LoadStoreLimitsResults[] = {
- " 0: 6fe0 ldr r0, [r4, #124] ; 0x7c\n",
- " 2: f8d4 0080 ldr.w r0, [r4, #128] ; 0x80\n",
- " 6: 7fe0 ldrb r0, [r4, #31]\n",
- " 8: f894 0020 ldrb.w r0, [r4, #32]\n",
- " c: 8fe0 ldrh r0, [r4, #62] ; 0x3e\n",
- " e: f8b4 0040 ldrh.w r0, [r4, #64] ; 0x40\n",
- " 12: f994 001f ldrsb.w r0, [r4, #31]\n",
- " 16: f994 0020 ldrsb.w r0, [r4, #32]\n",
- " 1a: f9b4 003e ldrsh.w r0, [r4, #62] ; 0x3e\n",
- " 1e: f9b4 0040 ldrsh.w r0, [r4, #64] ; 0x40\n",
- " 22: 67e0 str r0, [r4, #124] ; 0x7c\n",
- " 24: f8c4 0080 str.w r0, [r4, #128] ; 0x80\n",
- " 28: 77e0 strb r0, [r4, #31]\n",
- " 2a: f884 0020 strb.w r0, [r4, #32]\n",
- " 2e: 87e0 strh r0, [r4, #62] ; 0x3e\n",
- " 30: f8a4 0040 strh.w r0, [r4, #64] ; 0x40\n",
- nullptr
-};
-const char* const CompareAndBranchResults[] = {
- " 0: b130 cbz r0, 10 <CompareAndBranch+0x10>\n",
- " 2: f1bb 0f00 cmp.w fp, #0\n",
- " 6: d003 beq.n 10 <CompareAndBranch+0x10>\n",
- " 8: b910 cbnz r0, 10 <CompareAndBranch+0x10>\n",
- " a: f1bb 0f00 cmp.w fp, #0\n",
- " e: d1ff bne.n 10 <CompareAndBranch+0x10>\n",
- nullptr
-};
-
-const char* const AddConstantResults[] = {
- " 0: 4608 mov r0, r1\n",
- " 2: 1c48 adds r0, r1, #1\n",
- " 4: 1dc8 adds r0, r1, #7\n",
- " 6: f101 0008 add.w r0, r1, #8\n",
- " a: f101 00ff add.w r0, r1, #255 ; 0xff\n",
- " e: f501 7080 add.w r0, r1, #256 ; 0x100\n",
- " 12: f201 1001 addw r0, r1, #257 ; 0x101\n",
- " 16: f601 70ff addw r0, r1, #4095 ; 0xfff\n",
- " 1a: f501 5080 add.w r0, r1, #4096 ; 0x1000\n",
- " 1e: f46f 5080 mvn.w r0, #4096 ; 0x1000\n",
- " 22: 1a08 subs r0, r1, r0\n",
- " 24: f241 0002 movw r0, #4098 ; 0x1002\n",
- " 28: 1808 adds r0, r1, r0\n",
- " 2a: f64f 70ff movw r0, #65535 ; 0xffff\n",
- " 2e: 1808 adds r0, r1, r0\n",
- " 30: f501 3080 add.w r0, r1, #65536 ; 0x10000\n",
- " 34: f101 1001 add.w r0, r1, #65537 ; 0x10001\n",
- " 38: f06f 1001 mvn.w r0, #65537 ; 0x10001\n",
- " 3c: 1a08 subs r0, r1, r0\n",
- " 3e: f240 0003 movw r0, #3\n",
- " 42: f2c0 0001 movt r0, #1\n",
- " 46: 1808 adds r0, r1, r0\n",
- " 48: 1e48 subs r0, r1, #1\n",
- " 4a: 1fc8 subs r0, r1, #7\n",
- " 4c: f1a1 0008 sub.w r0, r1, #8\n",
- " 50: f1a1 00ff sub.w r0, r1, #255 ; 0xff\n",
- " 54: f5a1 7080 sub.w r0, r1, #256 ; 0x100\n",
- " 58: f2a1 1001 subw r0, r1, #257 ; 0x101\n",
- " 5c: f6a1 70ff subw r0, r1, #4095 ; 0xfff\n",
- " 60: f5a1 5080 sub.w r0, r1, #4096 ; 0x1000\n",
- " 64: f46f 5080 mvn.w r0, #4096 ; 0x1000\n",
- " 68: 1808 adds r0, r1, r0\n",
- " 6a: f241 0002 movw r0, #4098 ; 0x1002\n",
- " 6e: 1a08 subs r0, r1, r0\n",
- " 70: f64f 70ff movw r0, #65535 ; 0xffff\n",
- " 74: 1a08 subs r0, r1, r0\n",
- " 76: f5a1 3080 sub.w r0, r1, #65536 ; 0x10000\n",
- " 7a: f1a1 1001 sub.w r0, r1, #65537 ; 0x10001\n",
- " 7e: f06f 1001 mvn.w r0, #65537 ; 0x10001\n",
- " 82: 1808 adds r0, r1, r0\n",
- " 84: f64f 70fd movw r0, #65533 ; 0xfffd\n",
- " 88: f6cf 70fe movt r0, #65534 ; 0xfffe\n",
- " 8c: 1808 adds r0, r1, r0\n",
- " 8e: 3101 adds r1, #1\n",
- " 90: 3007 adds r0, #7\n",
- " 92: 3108 adds r1, #8\n",
- " 94: 30ff adds r0, #255 ; 0xff\n",
- " 96: f501 7180 add.w r1, r1, #256 ; 0x100\n",
- " 9a: f200 1001 addw r0, r0, #257 ; 0x101\n",
- " 9e: f601 71ff addw r1, r1, #4095 ; 0xfff\n",
- " a2: f500 5080 add.w r0, r0, #4096 ; 0x1000\n",
- " a6: f46f 5c80 mvn.w ip, #4096 ; 0x1000\n",
- " aa: eba1 010c sub.w r1, r1, ip\n",
- " ae: f241 0c02 movw ip, #4098 ; 0x1002\n",
- " b2: 4460 add r0, ip\n",
- " b4: f64f 7cff movw ip, #65535 ; 0xffff\n",
- " b8: 4461 add r1, ip\n",
- " ba: f500 3080 add.w r0, r0, #65536 ; 0x10000\n",
- " be: f101 1101 add.w r1, r1, #65537 ; 0x10001\n",
- " c2: f06f 1c01 mvn.w ip, #65537 ; 0x10001\n",
- " c6: eba0 000c sub.w r0, r0, ip\n",
- " ca: f240 0c03 movw ip, #3\n",
- " ce: f2c0 0c01 movt ip, #1\n",
- " d2: 4461 add r1, ip\n",
- " d4: 3801 subs r0, #1\n",
- " d6: 3907 subs r1, #7\n",
- " d8: 3808 subs r0, #8\n",
- " da: 39ff subs r1, #255 ; 0xff\n",
- " dc: f5a0 7080 sub.w r0, r0, #256 ; 0x100\n",
- " e0: f2a1 1101 subw r1, r1, #257 ; 0x101\n",
- " e4: f6a0 70ff subw r0, r0, #4095 ; 0xfff\n",
- " e8: f5a1 5180 sub.w r1, r1, #4096 ; 0x1000\n",
- " ec: f46f 5c80 mvn.w ip, #4096 ; 0x1000\n",
- " f0: 4460 add r0, ip\n",
- " f2: f241 0c02 movw ip, #4098 ; 0x1002\n",
- " f6: eba1 010c sub.w r1, r1, ip\n",
- " fa: f64f 7cff movw ip, #65535 ; 0xffff\n",
- " fe: eba0 000c sub.w r0, r0, ip\n",
- " 102: f5a1 3180 sub.w r1, r1, #65536 ; 0x10000\n",
- " 106: f1a0 1001 sub.w r0, r0, #65537 ; 0x10001\n",
- " 10a: f06f 1c01 mvn.w ip, #65537 ; 0x10001\n",
- " 10e: 4461 add r1, ip\n",
- " 110: f64f 7cfd movw ip, #65533 ; 0xfffd\n",
- " 114: f6cf 7cfe movt ip, #65534 ; 0xfffe\n",
- " 118: 4460 add r0, ip\n",
- " 11a: f101 0801 add.w r8, r1, #1\n",
- " 11e: f108 0007 add.w r0, r8, #7\n",
- " 122: f108 0808 add.w r8, r8, #8\n",
- " 126: f101 08ff add.w r8, r1, #255 ; 0xff\n",
- " 12a: f508 7080 add.w r0, r8, #256 ; 0x100\n",
- " 12e: f208 1801 addw r8, r8, #257 ; 0x101\n",
- " 132: f601 78ff addw r8, r1, #4095 ; 0xfff\n",
- " 136: f508 5080 add.w r0, r8, #4096 ; 0x1000\n",
- " 13a: f46f 5c80 mvn.w ip, #4096 ; 0x1000\n",
- " 13e: eba8 080c sub.w r8, r8, ip\n",
- " 142: f241 0002 movw r0, #4098 ; 0x1002\n",
- " 146: 1808 adds r0, r1, r0\n",
- " 148: f64f 70ff movw r0, #65535 ; 0xffff\n",
- " 14c: eb08 0000 add.w r0, r8, r0\n",
- " 150: f508 3880 add.w r8, r8, #65536 ; 0x10000\n",
- " 154: f101 1801 add.w r8, r1, #65537 ; 0x10001\n",
- " 158: f06f 1001 mvn.w r0, #65537 ; 0x10001\n",
- " 15c: eba8 0000 sub.w r0, r8, r0\n",
- " 160: f240 0003 movw r0, #3\n",
- " 164: f2c0 0001 movt r0, #1\n",
- " 168: eb08 0000 add.w r0, r8, r0\n",
- " 16c: f108 38ff add.w r8, r8, #4294967295 ; 0xffffffff\n",
- " 170: f1a1 0807 sub.w r8, r1, #7\n",
- " 174: f1a8 0008 sub.w r0, r8, #8\n",
- " 178: f1a8 08ff sub.w r8, r8, #255 ; 0xff\n",
- " 17c: f5a1 7880 sub.w r8, r1, #256 ; 0x100\n",
- " 180: f2a8 1001 subw r0, r8, #257 ; 0x101\n",
- " 184: f6a8 78ff subw r8, r8, #4095 ; 0xfff\n",
- " 188: f5a1 5880 sub.w r8, r1, #4096 ; 0x1000\n",
- " 18c: f46f 5080 mvn.w r0, #4096 ; 0x1000\n",
- " 190: eb08 0000 add.w r0, r8, r0\n",
- " 194: f241 0002 movw r0, #4098 ; 0x1002\n",
- " 198: 1a08 subs r0, r1, r0\n",
- " 19a: f64f 78ff movw r8, #65535 ; 0xffff\n",
- " 19e: eba1 0808 sub.w r8, r1, r8\n",
- " 1a2: f5a8 3080 sub.w r0, r8, #65536 ; 0x10000\n",
- " 1a6: f1a8 1801 sub.w r8, r8, #65537 ; 0x10001\n",
- " 1aa: f06f 1801 mvn.w r8, #65537 ; 0x10001\n",
- " 1ae: eb01 0808 add.w r8, r1, r8\n",
- " 1b2: f64f 70fd movw r0, #65533 ; 0xfffd\n",
- " 1b6: f6cf 70fe movt r0, #65534 ; 0xfffe\n",
- " 1ba: eb08 0000 add.w r0, r8, r0\n",
- " 1be: 4608 mov r0, r1\n",
- " 1c0: f101 0001 add.w r0, r1, #1\n",
- " 1c4: f101 0007 add.w r0, r1, #7\n",
- " 1c8: f101 0008 add.w r0, r1, #8\n",
- " 1cc: f101 00ff add.w r0, r1, #255 ; 0xff\n",
- " 1d0: f501 7080 add.w r0, r1, #256 ; 0x100\n",
- " 1d4: f201 1001 addw r0, r1, #257 ; 0x101\n",
- " 1d8: f601 70ff addw r0, r1, #4095 ; 0xfff\n",
- " 1dc: f501 5080 add.w r0, r1, #4096 ; 0x1000\n",
- " 1e0: f46f 5080 mvn.w r0, #4096 ; 0x1000\n",
- " 1e4: eba1 0000 sub.w r0, r1, r0\n",
- " 1e8: f241 0002 movw r0, #4098 ; 0x1002\n",
- " 1ec: eb01 0000 add.w r0, r1, r0\n",
- " 1f0: f64f 70ff movw r0, #65535 ; 0xffff\n",
- " 1f4: eb01 0000 add.w r0, r1, r0\n",
- " 1f8: f501 3080 add.w r0, r1, #65536 ; 0x10000\n",
- " 1fc: f101 1001 add.w r0, r1, #65537 ; 0x10001\n",
- " 200: f06f 1001 mvn.w r0, #65537 ; 0x10001\n",
- " 204: eba1 0000 sub.w r0, r1, r0\n",
- " 208: f240 0003 movw r0, #3\n",
- " 20c: f2c0 0001 movt r0, #1\n",
- " 210: eb01 0000 add.w r0, r1, r0\n",
- " 214: f101 30ff add.w r0, r1, #4294967295 ; 0xffffffff\n",
- " 218: f1a1 0007 sub.w r0, r1, #7\n",
- " 21c: f1a1 0008 sub.w r0, r1, #8\n",
- " 220: f1a1 00ff sub.w r0, r1, #255 ; 0xff\n",
- " 224: f5a1 7080 sub.w r0, r1, #256 ; 0x100\n",
- " 228: f2a1 1001 subw r0, r1, #257 ; 0x101\n",
- " 22c: f6a1 70ff subw r0, r1, #4095 ; 0xfff\n",
- " 230: f5a1 5080 sub.w r0, r1, #4096 ; 0x1000\n",
- " 234: f46f 5080 mvn.w r0, #4096 ; 0x1000\n",
- " 238: eb01 0000 add.w r0, r1, r0\n",
- " 23c: f241 0002 movw r0, #4098 ; 0x1002\n",
- " 240: eba1 0000 sub.w r0, r1, r0\n",
- " 244: f64f 70ff movw r0, #65535 ; 0xffff\n",
- " 248: eba1 0000 sub.w r0, r1, r0\n",
- " 24c: f5a1 3080 sub.w r0, r1, #65536 ; 0x10000\n",
- " 250: f1a1 1001 sub.w r0, r1, #65537 ; 0x10001\n",
- " 254: f06f 1001 mvn.w r0, #65537 ; 0x10001\n",
- " 258: eb01 0000 add.w r0, r1, r0\n",
- " 25c: f64f 70fd movw r0, #65533 ; 0xfffd\n",
- " 260: f6cf 70fe movt r0, #65534 ; 0xfffe\n",
- " 264: eb01 0000 add.w r0, r1, r0\n",
- " 268: f101 0101 add.w r1, r1, #1\n",
- " 26c: f100 0007 add.w r0, r0, #7\n",
- " 270: f101 0108 add.w r1, r1, #8\n",
- " 274: f100 00ff add.w r0, r0, #255 ; 0xff\n",
- " 278: f501 7180 add.w r1, r1, #256 ; 0x100\n",
- " 27c: f200 1001 addw r0, r0, #257 ; 0x101\n",
- " 280: f601 71ff addw r1, r1, #4095 ; 0xfff\n",
- " 284: f500 5080 add.w r0, r0, #4096 ; 0x1000\n",
- " 288: f46f 5c80 mvn.w ip, #4096 ; 0x1000\n",
- " 28c: eba1 010c sub.w r1, r1, ip\n",
- " 290: f241 0c02 movw ip, #4098 ; 0x1002\n",
- " 294: 4460 add r0, ip\n",
- " 296: f64f 7cff movw ip, #65535 ; 0xffff\n",
- " 29a: 4461 add r1, ip\n",
- " 29c: f500 3080 add.w r0, r0, #65536 ; 0x10000\n",
- " 2a0: f101 1101 add.w r1, r1, #65537 ; 0x10001\n",
- " 2a4: f06f 1c01 mvn.w ip, #65537 ; 0x10001\n",
- " 2a8: eba0 000c sub.w r0, r0, ip\n",
- " 2ac: f240 0c03 movw ip, #3\n",
- " 2b0: f2c0 0c01 movt ip, #1\n",
- " 2b4: 4461 add r1, ip\n",
- " 2b6: f100 30ff add.w r0, r0, #4294967295 ; 0xffffffff\n",
- " 2ba: f1a1 0107 sub.w r1, r1, #7\n",
- " 2be: f1a0 0008 sub.w r0, r0, #8\n",
- " 2c2: f1a1 01ff sub.w r1, r1, #255 ; 0xff\n",
- " 2c6: f5a0 7080 sub.w r0, r0, #256 ; 0x100\n",
- " 2ca: f2a1 1101 subw r1, r1, #257 ; 0x101\n",
- " 2ce: f6a0 70ff subw r0, r0, #4095 ; 0xfff\n",
- " 2d2: f5a1 5180 sub.w r1, r1, #4096 ; 0x1000\n",
- " 2d6: f46f 5c80 mvn.w ip, #4096 ; 0x1000\n",
- " 2da: 4460 add r0, ip\n",
- " 2dc: f241 0c02 movw ip, #4098 ; 0x1002\n",
- " 2e0: eba1 010c sub.w r1, r1, ip\n",
- " 2e4: f64f 7cff movw ip, #65535 ; 0xffff\n",
- " 2e8: eba0 000c sub.w r0, r0, ip\n",
- " 2ec: f5a1 3180 sub.w r1, r1, #65536 ; 0x10000\n",
- " 2f0: f1a0 1001 sub.w r0, r0, #65537 ; 0x10001\n",
- " 2f4: f06f 1c01 mvn.w ip, #65537 ; 0x10001\n",
- " 2f8: 4461 add r1, ip\n",
- " 2fa: f64f 7cfd movw ip, #65533 ; 0xfffd\n",
- " 2fe: f6cf 7cfe movt ip, #65534 ; 0xfffe\n",
- " 302: 4460 add r0, ip\n",
- " 304: 1c08 adds r0, r1, #0\n",
- " 306: 1c48 adds r0, r1, #1\n",
- " 308: 1dc8 adds r0, r1, #7\n",
- " 30a: f111 0008 adds.w r0, r1, #8\n",
- " 30e: f111 00ff adds.w r0, r1, #255 ; 0xff\n",
- " 312: f511 7080 adds.w r0, r1, #256 ; 0x100\n",
- " 316: f46f 7080 mvn.w r0, #256 ; 0x100\n",
- " 31a: 1a08 subs r0, r1, r0\n",
- " 31c: f640 70ff movw r0, #4095 ; 0xfff\n",
- " 320: 1808 adds r0, r1, r0\n",
- " 322: f511 5080 adds.w r0, r1, #4096 ; 0x1000\n",
- " 326: f46f 5080 mvn.w r0, #4096 ; 0x1000\n",
- " 32a: 1a08 subs r0, r1, r0\n",
- " 32c: f241 0002 movw r0, #4098 ; 0x1002\n",
- " 330: 1808 adds r0, r1, r0\n",
- " 332: f64f 70ff movw r0, #65535 ; 0xffff\n",
- " 336: 1808 adds r0, r1, r0\n",
- " 338: f511 3080 adds.w r0, r1, #65536 ; 0x10000\n",
- " 33c: f111 1001 adds.w r0, r1, #65537 ; 0x10001\n",
- " 340: f06f 1001 mvn.w r0, #65537 ; 0x10001\n",
- " 344: 1a08 subs r0, r1, r0\n",
- " 346: f240 0003 movw r0, #3\n",
- " 34a: f2c0 0001 movt r0, #1\n",
- " 34e: 1808 adds r0, r1, r0\n",
- " 350: 1e48 subs r0, r1, #1\n",
- " 352: 1fc8 subs r0, r1, #7\n",
- " 354: f1b1 0008 subs.w r0, r1, #8\n",
- " 358: f1b1 00ff subs.w r0, r1, #255 ; 0xff\n",
- " 35c: f5b1 7080 subs.w r0, r1, #256 ; 0x100\n",
- " 360: f46f 7080 mvn.w r0, #256 ; 0x100\n",
- " 364: 1808 adds r0, r1, r0\n",
- " 366: f640 70ff movw r0, #4095 ; 0xfff\n",
- " 36a: 1a08 subs r0, r1, r0\n",
- " 36c: f5b1 5080 subs.w r0, r1, #4096 ; 0x1000\n",
- " 370: f46f 5080 mvn.w r0, #4096 ; 0x1000\n",
- " 374: 1808 adds r0, r1, r0\n",
- " 376: f241 0002 movw r0, #4098 ; 0x1002\n",
- " 37a: 1a08 subs r0, r1, r0\n",
- " 37c: f64f 70ff movw r0, #65535 ; 0xffff\n",
- " 380: 1a08 subs r0, r1, r0\n",
- " 382: f5b1 3080 subs.w r0, r1, #65536 ; 0x10000\n",
- " 386: f1b1 1001 subs.w r0, r1, #65537 ; 0x10001\n",
- " 38a: f06f 1001 mvn.w r0, #65537 ; 0x10001\n",
- " 38e: 1808 adds r0, r1, r0\n",
- " 390: f64f 70fd movw r0, #65533 ; 0xfffd\n",
- " 394: f6cf 70fe movt r0, #65534 ; 0xfffe\n",
- " 398: 1808 adds r0, r1, r0\n",
- " 39a: 3000 adds r0, #0\n",
- " 39c: 3101 adds r1, #1\n",
- " 39e: 3007 adds r0, #7\n",
- " 3a0: 3108 adds r1, #8\n",
- " 3a2: 30ff adds r0, #255 ; 0xff\n",
- " 3a4: f511 7180 adds.w r1, r1, #256 ; 0x100\n",
- " 3a8: f46f 7c80 mvn.w ip, #256 ; 0x100\n",
- " 3ac: ebb0 000c subs.w r0, r0, ip\n",
- " 3b0: f640 7cff movw ip, #4095 ; 0xfff\n",
- " 3b4: eb11 010c adds.w r1, r1, ip\n",
- " 3b8: f510 5080 adds.w r0, r0, #4096 ; 0x1000\n",
- " 3bc: f46f 5c80 mvn.w ip, #4096 ; 0x1000\n",
- " 3c0: ebb1 010c subs.w r1, r1, ip\n",
- " 3c4: f241 0c02 movw ip, #4098 ; 0x1002\n",
- " 3c8: eb10 000c adds.w r0, r0, ip\n",
- " 3cc: f64f 7cff movw ip, #65535 ; 0xffff\n",
- " 3d0: eb11 010c adds.w r1, r1, ip\n",
- " 3d4: f510 3080 adds.w r0, r0, #65536 ; 0x10000\n",
- " 3d8: f111 1101 adds.w r1, r1, #65537 ; 0x10001\n",
- " 3dc: f06f 1c01 mvn.w ip, #65537 ; 0x10001\n",
- " 3e0: ebb0 000c subs.w r0, r0, ip\n",
- " 3e4: f240 0c03 movw ip, #3\n",
- " 3e8: f2c0 0c01 movt ip, #1\n",
- " 3ec: eb11 010c adds.w r1, r1, ip\n",
- " 3f0: 3801 subs r0, #1\n",
- " 3f2: 3907 subs r1, #7\n",
- " 3f4: 3808 subs r0, #8\n",
- " 3f6: 39ff subs r1, #255 ; 0xff\n",
- " 3f8: f5b0 7080 subs.w r0, r0, #256 ; 0x100\n",
- " 3fc: f46f 7c80 mvn.w ip, #256 ; 0x100\n",
- " 400: eb11 010c adds.w r1, r1, ip\n",
- " 404: f640 7cff movw ip, #4095 ; 0xfff\n",
- " 408: ebb0 000c subs.w r0, r0, ip\n",
- " 40c: f5b1 5180 subs.w r1, r1, #4096 ; 0x1000\n",
- " 410: f46f 5c80 mvn.w ip, #4096 ; 0x1000\n",
- " 414: eb10 000c adds.w r0, r0, ip\n",
- " 418: f241 0c02 movw ip, #4098 ; 0x1002\n",
- " 41c: ebb1 010c subs.w r1, r1, ip\n",
- " 420: f64f 7cff movw ip, #65535 ; 0xffff\n",
- " 424: ebb0 000c subs.w r0, r0, ip\n",
- " 428: f5b1 3180 subs.w r1, r1, #65536 ; 0x10000\n",
- " 42c: f1b0 1001 subs.w r0, r0, #65537 ; 0x10001\n",
- " 430: f06f 1c01 mvn.w ip, #65537 ; 0x10001\n",
- " 434: eb11 010c adds.w r1, r1, ip\n",
- " 438: f64f 7cfd movw ip, #65533 ; 0xfffd\n",
- " 43c: f6cf 7cfe movt ip, #65534 ; 0xfffe\n",
- " 440: eb10 000c adds.w r0, r0, ip\n",
- " 444: bf08 it eq\n",
- " 446: f111 0001 addseq.w r0, r1, #1\n",
- " 44a: bf18 it ne\n",
- " 44c: 1c48 addne r0, r1, #1\n",
- " 44e: bfa8 it ge\n",
- " 450: f110 0001 addsge.w r0, r0, #1\n",
- " 454: bfd8 it le\n",
- " 456: 3001 addle r0, #1\n",
- nullptr
-};
-
-const char* const CmpConstantResults[] = {
- " 0: 2800 cmp r0, #0\n",
- " 2: 2901 cmp r1, #1\n",
- " 4: 2807 cmp r0, #7\n",
- " 6: 2908 cmp r1, #8\n",
- " 8: 28ff cmp r0, #255 ; 0xff\n",
- " a: f5b1 7f80 cmp.w r1, #256 ; 0x100\n",
- " e: f46f 7c80 mvn.w ip, #256 ; 0x100\n",
- " 12: eb10 0f0c cmn.w r0, ip\n",
- " 16: f640 7cff movw ip, #4095 ; 0xfff\n",
- " 1a: 4561 cmp r1, ip\n",
- " 1c: f5b0 5f80 cmp.w r0, #4096 ; 0x1000\n",
- " 20: f46f 5c80 mvn.w ip, #4096 ; 0x1000\n",
- " 24: eb11 0f0c cmn.w r1, ip\n",
- " 28: f241 0c02 movw ip, #4098 ; 0x1002\n",
- " 2c: 4560 cmp r0, ip\n",
- " 2e: f64f 7cff movw ip, #65535 ; 0xffff\n",
- " 32: 4561 cmp r1, ip\n",
- " 34: f5b0 3f80 cmp.w r0, #65536 ; 0x10000\n",
- " 38: f1b1 1f01 cmp.w r1, #65537 ; 0x10001\n",
- " 3c: f06f 1c01 mvn.w ip, #65537 ; 0x10001\n",
- " 40: eb10 0f0c cmn.w r0, ip\n",
- " 44: f240 0c03 movw ip, #3\n",
- " 48: f2c0 0c01 movt ip, #1\n",
- " 4c: 4561 cmp r1, ip\n",
- " 4e: f1b0 3fff cmp.w r0, #4294967295 ; 0xffffffff\n",
- " 52: f111 0f07 cmn.w r1, #7\n",
- " 56: f110 0f08 cmn.w r0, #8\n",
- " 5a: f111 0fff cmn.w r1, #255 ; 0xff\n",
- " 5e: f510 7f80 cmn.w r0, #256 ; 0x100\n",
- " 62: f46f 7c80 mvn.w ip, #256 ; 0x100\n",
- " 66: 4561 cmp r1, ip\n",
- " 68: f640 7cff movw ip, #4095 ; 0xfff\n",
- " 6c: eb10 0f0c cmn.w r0, ip\n",
- " 70: f511 5f80 cmn.w r1, #4096 ; 0x1000\n",
- " 74: f46f 5c80 mvn.w ip, #4096 ; 0x1000\n",
- " 78: 4560 cmp r0, ip\n",
- " 7a: f241 0c02 movw ip, #4098 ; 0x1002\n",
- " 7e: eb11 0f0c cmn.w r1, ip\n",
- " 82: f64f 7cff movw ip, #65535 ; 0xffff\n",
- " 86: eb10 0f0c cmn.w r0, ip\n",
- " 8a: f511 3f80 cmn.w r1, #65536 ; 0x10000\n",
- " 8e: f110 1f01 cmn.w r0, #65537 ; 0x10001\n",
- " 92: f06f 1c01 mvn.w ip, #65537 ; 0x10001\n",
- " 96: 4561 cmp r1, ip\n",
- " 98: f64f 7cfd movw ip, #65533 ; 0xfffd\n",
- " 9c: f6cf 7cfe movt ip, #65534 ; 0xfffe\n",
- " a0: 4560 cmp r0, ip\n",
- " a2: f1b8 0f00 cmp.w r8, #0\n",
- " a6: f1b9 0f01 cmp.w r9, #1\n",
- " aa: f1b8 0f07 cmp.w r8, #7\n",
- " ae: f1b9 0f08 cmp.w r9, #8\n",
- " b2: f1b8 0fff cmp.w r8, #255 ; 0xff\n",
- " b6: f5b9 7f80 cmp.w r9, #256 ; 0x100\n",
- " ba: f46f 7c80 mvn.w ip, #256 ; 0x100\n",
- " be: eb18 0f0c cmn.w r8, ip\n",
- " c2: f640 7cff movw ip, #4095 ; 0xfff\n",
- " c6: 45e1 cmp r9, ip\n",
- " c8: f5b8 5f80 cmp.w r8, #4096 ; 0x1000\n",
- " cc: f46f 5c80 mvn.w ip, #4096 ; 0x1000\n",
- " d0: eb19 0f0c cmn.w r9, ip\n",
- " d4: f241 0c02 movw ip, #4098 ; 0x1002\n",
- " d8: 45e0 cmp r8, ip\n",
- " da: f64f 7cff movw ip, #65535 ; 0xffff\n",
- " de: 45e1 cmp r9, ip\n",
- " e0: f5b8 3f80 cmp.w r8, #65536 ; 0x10000\n",
- " e4: f1b9 1f01 cmp.w r9, #65537 ; 0x10001\n",
- " e8: f06f 1c01 mvn.w ip, #65537 ; 0x10001\n",
- " ec: eb18 0f0c cmn.w r8, ip\n",
- " f0: f240 0c03 movw ip, #3\n",
- " f4: f2c0 0c01 movt ip, #1\n",
- " f8: 45e1 cmp r9, ip\n",
- " fa: f1b8 3fff cmp.w r8, #4294967295 ; 0xffffffff\n",
- " fe: f119 0f07 cmn.w r9, #7\n",
- " 102: f118 0f08 cmn.w r8, #8\n",
- " 106: f119 0fff cmn.w r9, #255 ; 0xff\n",
- " 10a: f518 7f80 cmn.w r8, #256 ; 0x100\n",
- " 10e: f46f 7c80 mvn.w ip, #256 ; 0x100\n",
- " 112: 45e1 cmp r9, ip\n",
- " 114: f640 7cff movw ip, #4095 ; 0xfff\n",
- " 118: eb18 0f0c cmn.w r8, ip\n",
- " 11c: f519 5f80 cmn.w r9, #4096 ; 0x1000\n",
- " 120: f46f 5c80 mvn.w ip, #4096 ; 0x1000\n",
- " 124: 45e0 cmp r8, ip\n",
- " 126: f241 0c02 movw ip, #4098 ; 0x1002\n",
- " 12a: eb19 0f0c cmn.w r9, ip\n",
- " 12e: f64f 7cff movw ip, #65535 ; 0xffff\n",
- " 132: eb18 0f0c cmn.w r8, ip\n",
- " 136: f519 3f80 cmn.w r9, #65536 ; 0x10000\n",
- " 13a: f118 1f01 cmn.w r8, #65537 ; 0x10001\n",
- " 13e: f06f 1c01 mvn.w ip, #65537 ; 0x10001\n",
- " 142: 45e1 cmp r9, ip\n",
- " 144: f64f 7cfd movw ip, #65533 ; 0xfffd\n",
- " 148: f6cf 7cfe movt ip, #65534 ; 0xfffe\n",
- " 14c: 45e0 cmp r8, ip\n",
- nullptr
-};
-
const char* const VixlJniHelpersResults[] = {
" 0: e92d 4de0 stmdb sp!, {r5, r6, r7, r8, sl, fp, lr}\n",
" 4: ed2d 8a10 vpush {s16-s31}\n",
@@ -5720,55 +261,6 @@
std::map<std::string, const char* const*> test_results;
void setup_results() {
- test_results["SimpleMov"] = SimpleMovResults;
- test_results["SimpleMov32"] = SimpleMov32Results;
- test_results["SimpleMovAdd"] = SimpleMovAddResults;
- test_results["DataProcessingRegister"] = DataProcessingRegisterResults;
- test_results["DataProcessingImmediate"] = DataProcessingImmediateResults;
- test_results["DataProcessingModifiedImmediate"] = DataProcessingModifiedImmediateResults;
- test_results["DataProcessingModifiedImmediates"] = DataProcessingModifiedImmediatesResults;
- test_results["DataProcessingShiftedRegister"] = DataProcessingShiftedRegisterResults;
- test_results["ShiftImmediate"] = ShiftImmediateResults;
- test_results["BasicLoad"] = BasicLoadResults;
- test_results["BasicStore"] = BasicStoreResults;
- test_results["ComplexLoad"] = ComplexLoadResults;
- test_results["ComplexStore"] = ComplexStoreResults;
- test_results["NegativeLoadStore"] = NegativeLoadStoreResults;
- test_results["SimpleLoadStoreDual"] = SimpleLoadStoreDualResults;
- test_results["ComplexLoadStoreDual"] = ComplexLoadStoreDualResults;
- test_results["NegativeLoadStoreDual"] = NegativeLoadStoreDualResults;
- test_results["SimpleBranch"] = SimpleBranchResults;
- test_results["LongBranch"] = LongBranchResults;
- test_results["LoadMultiple"] = LoadMultipleResults;
- test_results["StoreMultiple"] = StoreMultipleResults;
- test_results["MovWMovT"] = MovWMovTResults;
- test_results["SpecialAddSub"] = SpecialAddSubResults;
- test_results["LoadFromOffset"] = LoadFromOffsetResults;
- test_results["StoreToOffset"] = StoreToOffsetResults;
- test_results["IfThen"] = IfThenResults;
- test_results["CbzCbnz"] = CbzCbnzResults;
- test_results["Multiply"] = MultiplyResults;
- test_results["Divide"] = DivideResults;
- test_results["VMov"] = VMovResults;
- test_results["BasicFloatingPoint"] = BasicFloatingPointResults;
- test_results["FloatingPointConversions"] = FloatingPointConversionsResults;
- test_results["FloatingPointComparisons"] = FloatingPointComparisonsResults;
- test_results["Calls"] = CallsResults;
- test_results["Breakpoint"] = BreakpointResults;
- test_results["StrR1"] = StrR1Results;
- test_results["VPushPop"] = VPushPopResults;
- test_results["Max16BitBranch"] = Max16BitBranchResults;
- test_results["Branch32"] = Branch32Results;
- test_results["CompareAndBranchMax"] = CompareAndBranchMaxResults;
- test_results["CompareAndBranchRelocation16"] = CompareAndBranchRelocation16Results;
- test_results["CompareAndBranchRelocation32"] = CompareAndBranchRelocation32Results;
- test_results["MixedBranch32"] = MixedBranch32Results;
- test_results["Shifts"] = ShiftsResults;
- test_results["LoadStoreRegOffset"] = LoadStoreRegOffsetResults;
- test_results["LoadStoreLimits"] = LoadStoreLimitsResults;
- test_results["CompareAndBranch"] = CompareAndBranchResults;
- test_results["AddConstant"] = AddConstantResults;
- test_results["CmpConstant"] = CmpConstantResults;
test_results["VixlJniHelpers"] = VixlJniHelpersResults;
test_results["VixlStoreToOffset"] = VixlStoreToOffsetResults;
test_results["VixlLoadFromOffset"] = VixlLoadFromOffsetResults;
diff --git a/compiler/utils/label.h b/compiler/utils/label.h
index 4c6ae8e..85710d0 100644
--- a/compiler/utils/label.h
+++ b/compiler/utils/label.h
@@ -26,10 +26,6 @@
class AssemblerBuffer;
class AssemblerFixup;
-namespace arm {
- class ArmAssembler;
- class Thumb2Assembler;
-} // namespace arm
namespace arm64 {
class Arm64Assembler;
} // namespace arm64
@@ -116,8 +112,6 @@
CHECK(IsLinked());
}
- friend class arm::ArmAssembler;
- friend class arm::Thumb2Assembler;
friend class arm64::Arm64Assembler;
friend class mips::MipsAssembler;
friend class mips64::Mips64Assembler;