blob: 4f92eb005b1faa48c8e5595a5a14b6e3ef226571 [file] [log] [blame]
Alexey Frunze4dda3372015-06-01 18:31:49 -07001/*
2 * Copyright (C) 2015 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_COMPILER_OPTIMIZING_CODE_GENERATOR_MIPS64_H_
18#define ART_COMPILER_OPTIMIZING_CODE_GENERATOR_MIPS64_H_
19
20#include "code_generator.h"
21#include "dex/compiler_enums.h"
22#include "driver/compiler_options.h"
23#include "nodes.h"
24#include "parallel_move_resolver.h"
25#include "utils/mips64/assembler_mips64.h"
26
27namespace art {
28namespace mips64 {
29
30// Use a local definition to prevent copying mistakes.
31static constexpr size_t kMips64WordSize = kMips64PointerSize;
32
33
34// InvokeDexCallingConvention registers
35
36static constexpr GpuRegister kParameterCoreRegisters[] =
37 { A1, A2, A3, A4, A5, A6, A7 };
38static constexpr size_t kParameterCoreRegistersLength = arraysize(kParameterCoreRegisters);
39
40static constexpr FpuRegister kParameterFpuRegisters[] =
41 { F13, F14, F15, F16, F17, F18, F19 };
42static constexpr size_t kParameterFpuRegistersLength = arraysize(kParameterFpuRegisters);
43
44
45// InvokeRuntimeCallingConvention registers
46
47static constexpr GpuRegister kRuntimeParameterCoreRegisters[] =
48 { A0, A1, A2, A3, A4, A5, A6, A7 };
49static constexpr size_t kRuntimeParameterCoreRegistersLength =
50 arraysize(kRuntimeParameterCoreRegisters);
51
52static constexpr FpuRegister kRuntimeParameterFpuRegisters[] =
53 { F12, F13, F14, F15, F16, F17, F18, F19 };
54static constexpr size_t kRuntimeParameterFpuRegistersLength =
55 arraysize(kRuntimeParameterFpuRegisters);
56
57
58static constexpr GpuRegister kCoreCalleeSaves[] =
59 { S0, S1, S2, S3, S4, S5, S6, S7, GP, S8, RA }; // TODO: review
60static constexpr FpuRegister kFpuCalleeSaves[] =
61 { F24, F25, F26, F27, F28, F29, F30, F31 };
62
63
64class CodeGeneratorMIPS64;
65
66class InvokeDexCallingConvention : public CallingConvention<GpuRegister, FpuRegister> {
67 public:
68 InvokeDexCallingConvention()
69 : CallingConvention(kParameterCoreRegisters,
70 kParameterCoreRegistersLength,
71 kParameterFpuRegisters,
72 kParameterFpuRegistersLength,
73 kMips64PointerSize) {}
74
75 private:
76 DISALLOW_COPY_AND_ASSIGN(InvokeDexCallingConvention);
77};
78
79class InvokeDexCallingConventionVisitorMIPS64 : public InvokeDexCallingConventionVisitor {
80 public:
81 InvokeDexCallingConventionVisitorMIPS64() {}
82 virtual ~InvokeDexCallingConventionVisitorMIPS64() {}
83
84 Location GetNextLocation(Primitive::Type type) OVERRIDE;
85 Location GetReturnLocation(Primitive::Type type) const OVERRIDE;
86 Location GetMethodLocation() const OVERRIDE;
87
88 private:
89 InvokeDexCallingConvention calling_convention;
90
91 DISALLOW_COPY_AND_ASSIGN(InvokeDexCallingConventionVisitorMIPS64);
92};
93
94class InvokeRuntimeCallingConvention : public CallingConvention<GpuRegister, FpuRegister> {
95 public:
96 InvokeRuntimeCallingConvention()
97 : CallingConvention(kRuntimeParameterCoreRegisters,
98 kRuntimeParameterCoreRegistersLength,
99 kRuntimeParameterFpuRegisters,
100 kRuntimeParameterFpuRegistersLength,
101 kMips64PointerSize) {}
102
103 Location GetReturnLocation(Primitive::Type return_type);
104
105 private:
106 DISALLOW_COPY_AND_ASSIGN(InvokeRuntimeCallingConvention);
107};
108
109class ParallelMoveResolverMIPS64 : public ParallelMoveResolverWithSwap {
110 public:
111 ParallelMoveResolverMIPS64(ArenaAllocator* allocator, CodeGeneratorMIPS64* codegen)
112 : ParallelMoveResolverWithSwap(allocator), codegen_(codegen) {}
113
114 void EmitMove(size_t index) OVERRIDE;
115 void EmitSwap(size_t index) OVERRIDE;
116 void SpillScratch(int reg) OVERRIDE;
117 void RestoreScratch(int reg) OVERRIDE;
118
119 void Exchange(int index1, int index2, bool double_slot);
120
121 Mips64Assembler* GetAssembler() const;
122
123 private:
124 CodeGeneratorMIPS64* const codegen_;
125
126 DISALLOW_COPY_AND_ASSIGN(ParallelMoveResolverMIPS64);
127};
128
129class SlowPathCodeMIPS64 : public SlowPathCode {
130 public:
131 SlowPathCodeMIPS64() : entry_label_(), exit_label_() {}
132
133 Label* GetEntryLabel() { return &entry_label_; }
134 Label* GetExitLabel() { return &exit_label_; }
135
136 private:
137 Label entry_label_;
138 Label exit_label_;
139
140 DISALLOW_COPY_AND_ASSIGN(SlowPathCodeMIPS64);
141};
142
143class LocationsBuilderMIPS64 : public HGraphVisitor {
144 public:
145 LocationsBuilderMIPS64(HGraph* graph, CodeGeneratorMIPS64* codegen)
146 : HGraphVisitor(graph), codegen_(codegen) {}
147
148#define DECLARE_VISIT_INSTRUCTION(name, super) \
Alexandre Ramesf39e0642015-06-23 11:33:45 +0100149 void Visit##name(H##name* instr) OVERRIDE;
Alexey Frunze4dda3372015-06-01 18:31:49 -0700150
Alexandre Ramesf39e0642015-06-23 11:33:45 +0100151 FOR_EACH_CONCRETE_INSTRUCTION_COMMON(DECLARE_VISIT_INSTRUCTION)
152 FOR_EACH_CONCRETE_INSTRUCTION_MIPS64(DECLARE_VISIT_INSTRUCTION)
Alexey Frunze4dda3372015-06-01 18:31:49 -0700153
154#undef DECLARE_VISIT_INSTRUCTION
155
Alexandre Ramesf39e0642015-06-23 11:33:45 +0100156 void VisitInstruction(HInstruction* instruction) OVERRIDE {
157 LOG(FATAL) << "Unreachable instruction " << instruction->DebugName()
158 << " (id " << instruction->GetId() << ")";
159 }
160
Alexey Frunze4dda3372015-06-01 18:31:49 -0700161 private:
162 void HandleInvoke(HInvoke* invoke);
163 void HandleBinaryOp(HBinaryOperation* operation);
164 void HandleShift(HBinaryOperation* operation);
165 void HandleFieldSet(HInstruction* instruction, const FieldInfo& field_info);
166 void HandleFieldGet(HInstruction* instruction, const FieldInfo& field_info);
167
168 InvokeDexCallingConventionVisitorMIPS64 parameter_visitor_;
169
170 CodeGeneratorMIPS64* const codegen_;
171
172 DISALLOW_COPY_AND_ASSIGN(LocationsBuilderMIPS64);
173};
174
175class InstructionCodeGeneratorMIPS64 : public HGraphVisitor {
176 public:
177 InstructionCodeGeneratorMIPS64(HGraph* graph, CodeGeneratorMIPS64* codegen);
178
179#define DECLARE_VISIT_INSTRUCTION(name, super) \
Alexandre Ramesf39e0642015-06-23 11:33:45 +0100180 void Visit##name(H##name* instr) OVERRIDE;
Alexey Frunze4dda3372015-06-01 18:31:49 -0700181
Alexandre Ramesf39e0642015-06-23 11:33:45 +0100182 FOR_EACH_CONCRETE_INSTRUCTION_COMMON(DECLARE_VISIT_INSTRUCTION)
183 FOR_EACH_CONCRETE_INSTRUCTION_MIPS64(DECLARE_VISIT_INSTRUCTION)
Alexey Frunze4dda3372015-06-01 18:31:49 -0700184
185#undef DECLARE_VISIT_INSTRUCTION
186
Alexandre Ramesf39e0642015-06-23 11:33:45 +0100187 void VisitInstruction(HInstruction* instruction) OVERRIDE {
188 LOG(FATAL) << "Unreachable instruction " << instruction->DebugName()
189 << " (id " << instruction->GetId() << ")";
190 }
191
Alexey Frunze4dda3372015-06-01 18:31:49 -0700192 Mips64Assembler* GetAssembler() const { return assembler_; }
193
194 private:
195 // Generate code for the given suspend check. If not null, `successor`
196 // is the block to branch to if the suspend check is not needed, and after
197 // the suspend call.
198 void GenerateClassInitializationCheck(SlowPathCodeMIPS64* slow_path, GpuRegister class_reg);
199 void GenerateMemoryBarrier(MemBarrierKind kind);
200 void GenerateSuspendCheck(HSuspendCheck* check, HBasicBlock* successor);
201 void HandleBinaryOp(HBinaryOperation* operation);
202 void HandleShift(HBinaryOperation* operation);
203 void HandleFieldSet(HInstruction* instruction, const FieldInfo& field_info);
204 void HandleFieldGet(HInstruction* instruction, const FieldInfo& field_info);
205 void GenerateImplicitNullCheck(HNullCheck* instruction);
206 void GenerateExplicitNullCheck(HNullCheck* instruction);
207 void GenerateTestAndBranch(HInstruction* instruction,
208 Label* true_target,
209 Label* false_target,
210 Label* always_true_target);
211
212 Mips64Assembler* const assembler_;
213 CodeGeneratorMIPS64* const codegen_;
214
215 DISALLOW_COPY_AND_ASSIGN(InstructionCodeGeneratorMIPS64);
216};
217
218class CodeGeneratorMIPS64 : public CodeGenerator {
219 public:
220 CodeGeneratorMIPS64(HGraph* graph,
221 const Mips64InstructionSetFeatures& isa_features,
222 const CompilerOptions& compiler_options);
223 virtual ~CodeGeneratorMIPS64() {}
224
225 void GenerateFrameEntry() OVERRIDE;
226 void GenerateFrameExit() OVERRIDE;
227
228 void Bind(HBasicBlock* block) OVERRIDE;
229
230 void Move(HInstruction* instruction, Location location, HInstruction* move_for) OVERRIDE;
231
232 size_t GetWordSize() const OVERRIDE { return kMips64WordSize; }
233
234 size_t GetFloatingPointSpillSlotSize() const OVERRIDE { return kMips64WordSize; }
235
236 uintptr_t GetAddressOf(HBasicBlock* block) const OVERRIDE {
237 return GetLabelOf(block)->Position();
238 }
239
240 HGraphVisitor* GetLocationBuilder() OVERRIDE { return &location_builder_; }
241 HGraphVisitor* GetInstructionVisitor() OVERRIDE { return &instruction_visitor_; }
242 Mips64Assembler* GetAssembler() OVERRIDE { return &assembler_; }
Alexandre Rameseb7b7392015-06-19 14:47:01 +0100243 const Mips64Assembler& GetAssembler() const OVERRIDE { return assembler_; }
Alexey Frunze4dda3372015-06-01 18:31:49 -0700244
245 void MarkGCCard(GpuRegister object, GpuRegister value);
246
247 // Register allocation.
248
249 void SetupBlockedRegisters(bool is_baseline) const OVERRIDE;
250 // AllocateFreeRegister() is only used when allocating registers locally
251 // during CompileBaseline().
252 Location AllocateFreeRegister(Primitive::Type type) const OVERRIDE;
253
254 Location GetStackLocation(HLoadLocal* load) const OVERRIDE;
255
256 size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id);
257 size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id);
258 size_t SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id);
259 size_t RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id);
260
261 void DumpCoreRegister(std::ostream& stream, int reg) const OVERRIDE;
262 void DumpFloatingPointRegister(std::ostream& stream, int reg) const OVERRIDE;
263
264 InstructionSet GetInstructionSet() const OVERRIDE { return InstructionSet::kMips64; }
265
266 const Mips64InstructionSetFeatures& GetInstructionSetFeatures() const {
267 return isa_features_;
268 }
269
270 Label* GetLabelOf(HBasicBlock* block) const {
271 return CommonGetLabelOf<Label>(block_labels_.GetRawStorage(), block);
272 }
273
274 void Initialize() OVERRIDE {
275 block_labels_.SetSize(GetGraph()->GetBlocks().Size());
276 }
277
278 void Finalize(CodeAllocator* allocator) OVERRIDE;
279
280 // Code generation helpers.
281
282 void MoveLocation(Location destination, Location source, Primitive::Type type);
283
284 void SwapLocations(Location loc1, Location loc2, Primitive::Type type);
285
286 // Generate code to invoke a runtime entry point.
287 void InvokeRuntime(int32_t offset,
288 HInstruction* instruction,
289 uint32_t dex_pc,
290 SlowPathCode* slow_path);
291
292 ParallelMoveResolver* GetMoveResolver() OVERRIDE { return &move_resolver_; }
293
294 bool NeedsTwoRegisters(Primitive::Type type ATTRIBUTE_UNUSED) const { return false; }
295
296 void GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp);
297
298 private:
299 // Labels for each block that will be compiled.
300 GrowableArray<Label> block_labels_;
301 Label frame_entry_label_;
302 LocationsBuilderMIPS64 location_builder_;
303 InstructionCodeGeneratorMIPS64 instruction_visitor_;
304 ParallelMoveResolverMIPS64 move_resolver_;
305 Mips64Assembler assembler_;
306 const Mips64InstructionSetFeatures& isa_features_;
307
308 DISALLOW_COPY_AND_ASSIGN(CodeGeneratorMIPS64);
309};
310
311} // namespace mips64
312} // namespace art
313
314#endif // ART_COMPILER_OPTIMIZING_CODE_GENERATOR_MIPS64_H_