Refactor compiled_method.h .

Move LinkerPatch to compiler/linker/linker_patch.h .
Move SrcMapElem to compiler/debug/src_map_elem.h .
Introduce compiled_method-inl.h to reduce the number
of `#include`s in compiled_method.h .

Test: m test-art-host-gtest
Test: testrunner.py --host
Change-Id: Id211cdf94a63ad265bf4709f1a5e06dffbe30f64
diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h
index 69c5119..cebdaa1 100644
--- a/compiler/optimizing/code_generator_arm64.h
+++ b/compiler/optimizing/code_generator_arm64.h
@@ -627,7 +627,7 @@
                                 vixl::aarch64::Register out,
                                 vixl::aarch64::Register base);
 
-  void EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_patches) OVERRIDE;
+  void EmitLinkerPatches(ArenaVector<linker::LinkerPatch>* linker_patches) OVERRIDE;
 
   void EmitJitRootPatches(uint8_t* code, const uint8_t* roots_data) OVERRIDE;
 
@@ -805,9 +805,9 @@
 
   void EmitJumpTables();
 
-  template <LinkerPatch (*Factory)(size_t, const DexFile*, uint32_t, uint32_t)>
+  template <linker::LinkerPatch (*Factory)(size_t, const DexFile*, uint32_t, uint32_t)>
   static void EmitPcRelativeLinkerPatches(const ArenaDeque<PcRelativePatchInfo>& infos,
-                                          ArenaVector<LinkerPatch>* linker_patches);
+                                          ArenaVector<linker::LinkerPatch>* linker_patches);
 
   // Labels for each block that will be compiled.
   // We use a deque so that the `vixl::aarch64::Label` objects do not move in memory.