ARM64: Remove all uses of BlockPoolsScope.

BlockPoolsScope should not be used because it is a VIXL scope
for VIXL internal usage only. In arm64 backend the intent was to
block pools between a particular instruction (Ldr, Str, Blr, etc)
and a subsequent MaybeRecordImplicitNullCheck or RecordPcInfo call.
However pools should be emitted at the opening of a scope if this
is required to satisfy branch/ldr ranges. This is not done by the
BlockPoolsScope, so proper scopes are now used now:
ExactAssemblyScope and EmissionCheckScope.

Test: test-art-host
Test: test-art-target

Bug: 34850123

Change-Id: I30365ad63c644cf9dd85d5a3c2118f9c57be9d20
diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h
index f6cb90a..5faf29a 100644
--- a/compiler/optimizing/code_generator_arm64.h
+++ b/compiler/optimizing/code_generator_arm64.h
@@ -43,6 +43,11 @@
 // Use a local definition to prevent copying mistakes.
 static constexpr size_t kArm64WordSize = static_cast<size_t>(kArm64PointerSize);
 
+// These constants are used as an approximate margin when emission of veneer and literal pools
+// must be blocked.
+static constexpr int kMaxMacroInstructionSizeInBytes = 15 * vixl::aarch64::kInstructionSize;
+static constexpr int kInvokeCodeMarginSizeInBytes = 6 * kMaxMacroInstructionSizeInBytes;
+
 static const vixl::aarch64::Register kParameterCoreRegisters[] = {
   vixl::aarch64::x1,
   vixl::aarch64::x2,
@@ -486,9 +491,11 @@
                    vixl::aarch64::CPURegister dst,
                    const vixl::aarch64::MemOperand& src,
                    bool needs_null_check);
-  void StoreRelease(Primitive::Type type,
+  void StoreRelease(HInstruction* instruction,
+                    Primitive::Type type,
                     vixl::aarch64::CPURegister src,
-                    const vixl::aarch64::MemOperand& dst);
+                    const vixl::aarch64::MemOperand& dst,
+                    bool needs_null_check);
 
   // Generate code to invoke a runtime entry point.
   void InvokeRuntime(QuickEntrypointEnum entrypoint,
@@ -502,8 +509,6 @@
                                            HInstruction* instruction,
                                            SlowPathCode* slow_path);
 
-  void GenerateInvokeRuntime(int32_t entry_point_offset);
-
   ParallelMoveResolverARM64* GetMoveResolver() OVERRIDE { return &move_resolver_; }
 
   bool NeedsTwoRegisters(Primitive::Type type ATTRIBUTE_UNUSED) const OVERRIDE {