blob: 756336d0ee4928c7cf68b403573b879f141ac61a [file] [log] [blame]
Alexey Frunze4dda3372015-06-01 18:31:49 -07001/*
2 * Copyright (C) 2015 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "code_generator_mips64.h"
18
19#include "entrypoints/quick/quick_entrypoints.h"
20#include "entrypoints/quick/quick_entrypoints_enum.h"
21#include "gc/accounting/card_table.h"
22#include "intrinsics.h"
Chris Larsen3039e382015-08-26 07:54:08 -070023#include "intrinsics_mips64.h"
Alexey Frunze4dda3372015-06-01 18:31:49 -070024#include "art_method.h"
Chris Larsen3039e382015-08-26 07:54:08 -070025#include "code_generator_utils.h"
Alexey Frunze4dda3372015-06-01 18:31:49 -070026#include "mirror/array-inl.h"
27#include "mirror/class-inl.h"
28#include "offsets.h"
29#include "thread.h"
30#include "utils/mips64/assembler_mips64.h"
31#include "utils/assembler.h"
32#include "utils/stack_checks.h"
33
34namespace art {
35namespace mips64 {
36
37static constexpr int kCurrentMethodStackOffset = 0;
38static constexpr GpuRegister kMethodRegisterArgument = A0;
39
40// We need extra temporary/scratch registers (in addition to AT) in some cases.
Alexey Frunze4dda3372015-06-01 18:31:49 -070041static constexpr FpuRegister FTMP = F8;
42
Alexey Frunze4dda3372015-06-01 18:31:49 -070043Location Mips64ReturnLocation(Primitive::Type return_type) {
44 switch (return_type) {
45 case Primitive::kPrimBoolean:
46 case Primitive::kPrimByte:
47 case Primitive::kPrimChar:
48 case Primitive::kPrimShort:
49 case Primitive::kPrimInt:
50 case Primitive::kPrimNot:
51 case Primitive::kPrimLong:
52 return Location::RegisterLocation(V0);
53
54 case Primitive::kPrimFloat:
55 case Primitive::kPrimDouble:
56 return Location::FpuRegisterLocation(F0);
57
58 case Primitive::kPrimVoid:
59 return Location();
60 }
61 UNREACHABLE();
62}
63
64Location InvokeDexCallingConventionVisitorMIPS64::GetReturnLocation(Primitive::Type type) const {
65 return Mips64ReturnLocation(type);
66}
67
68Location InvokeDexCallingConventionVisitorMIPS64::GetMethodLocation() const {
69 return Location::RegisterLocation(kMethodRegisterArgument);
70}
71
72Location InvokeDexCallingConventionVisitorMIPS64::GetNextLocation(Primitive::Type type) {
73 Location next_location;
74 if (type == Primitive::kPrimVoid) {
75 LOG(FATAL) << "Unexpected parameter type " << type;
76 }
77
78 if (Primitive::IsFloatingPointType(type) &&
79 (float_index_ < calling_convention.GetNumberOfFpuRegisters())) {
80 next_location = Location::FpuRegisterLocation(
81 calling_convention.GetFpuRegisterAt(float_index_++));
82 gp_index_++;
83 } else if (!Primitive::IsFloatingPointType(type) &&
84 (gp_index_ < calling_convention.GetNumberOfRegisters())) {
85 next_location = Location::RegisterLocation(calling_convention.GetRegisterAt(gp_index_++));
86 float_index_++;
87 } else {
88 size_t stack_offset = calling_convention.GetStackOffsetOf(stack_index_);
89 next_location = Primitive::Is64BitType(type) ? Location::DoubleStackSlot(stack_offset)
90 : Location::StackSlot(stack_offset);
91 }
92
93 // Space on the stack is reserved for all arguments.
94 stack_index_ += Primitive::Is64BitType(type) ? 2 : 1;
95
96 // TODO: review
97
98 // TODO: shouldn't we use a whole machine word per argument on the stack?
99 // Implicit 4-byte method pointer (and such) will cause misalignment.
100
101 return next_location;
102}
103
104Location InvokeRuntimeCallingConvention::GetReturnLocation(Primitive::Type type) {
105 return Mips64ReturnLocation(type);
106}
107
108#define __ down_cast<CodeGeneratorMIPS64*>(codegen)->GetAssembler()->
109#define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kMips64WordSize, x).Int32Value()
110
111class BoundsCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 {
112 public:
Serban Constantinescu5a6cc492015-08-13 15:20:25 +0100113 explicit BoundsCheckSlowPathMIPS64(HBoundsCheck* instruction) : instruction_(instruction) {}
Alexey Frunze4dda3372015-06-01 18:31:49 -0700114
115 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
Serban Constantinescu5a6cc492015-08-13 15:20:25 +0100116 LocationSummary* locations = instruction_->GetLocations();
Alexey Frunze4dda3372015-06-01 18:31:49 -0700117 CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
118 __ Bind(GetEntryLabel());
David Brazdil77a48ae2015-09-15 12:34:04 +0000119 if (instruction_->CanThrowIntoCatchBlock()) {
120 // Live registers will be restored in the catch block if caught.
121 SaveLiveRegisters(codegen, instruction_->GetLocations());
122 }
Alexey Frunze4dda3372015-06-01 18:31:49 -0700123 // We're moving two locations to locations that could overlap, so we need a parallel
124 // move resolver.
125 InvokeRuntimeCallingConvention calling_convention;
Serban Constantinescu5a6cc492015-08-13 15:20:25 +0100126 codegen->EmitParallelMoves(locations->InAt(0),
Alexey Frunze4dda3372015-06-01 18:31:49 -0700127 Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
128 Primitive::kPrimInt,
Serban Constantinescu5a6cc492015-08-13 15:20:25 +0100129 locations->InAt(1),
Alexey Frunze4dda3372015-06-01 18:31:49 -0700130 Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
131 Primitive::kPrimInt);
132 mips64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pThrowArrayBounds),
133 instruction_,
134 instruction_->GetDexPc(),
135 this);
136 CheckEntrypointTypes<kQuickThrowArrayBounds, void, int32_t, int32_t>();
137 }
138
Alexandre Rames8158f282015-08-07 10:26:17 +0100139 bool IsFatal() const OVERRIDE { return true; }
140
Roland Levillain46648892015-06-19 16:07:18 +0100141 const char* GetDescription() const OVERRIDE { return "BoundsCheckSlowPathMIPS64"; }
142
Alexey Frunze4dda3372015-06-01 18:31:49 -0700143 private:
144 HBoundsCheck* const instruction_;
Alexey Frunze4dda3372015-06-01 18:31:49 -0700145
146 DISALLOW_COPY_AND_ASSIGN(BoundsCheckSlowPathMIPS64);
147};
148
149class DivZeroCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 {
150 public:
151 explicit DivZeroCheckSlowPathMIPS64(HDivZeroCheck* instruction) : instruction_(instruction) {}
152
153 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
154 CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
155 __ Bind(GetEntryLabel());
David Brazdil77a48ae2015-09-15 12:34:04 +0000156 if (instruction_->CanThrowIntoCatchBlock()) {
157 // Live registers will be restored in the catch block if caught.
158 SaveLiveRegisters(codegen, instruction_->GetLocations());
159 }
Alexey Frunze4dda3372015-06-01 18:31:49 -0700160 mips64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pThrowDivZero),
161 instruction_,
162 instruction_->GetDexPc(),
163 this);
164 CheckEntrypointTypes<kQuickThrowDivZero, void, void>();
165 }
166
Alexandre Rames8158f282015-08-07 10:26:17 +0100167 bool IsFatal() const OVERRIDE { return true; }
168
Roland Levillain46648892015-06-19 16:07:18 +0100169 const char* GetDescription() const OVERRIDE { return "DivZeroCheckSlowPathMIPS64"; }
170
Alexey Frunze4dda3372015-06-01 18:31:49 -0700171 private:
172 HDivZeroCheck* const instruction_;
173 DISALLOW_COPY_AND_ASSIGN(DivZeroCheckSlowPathMIPS64);
174};
175
176class LoadClassSlowPathMIPS64 : public SlowPathCodeMIPS64 {
177 public:
178 LoadClassSlowPathMIPS64(HLoadClass* cls,
179 HInstruction* at,
180 uint32_t dex_pc,
181 bool do_clinit)
182 : cls_(cls), at_(at), dex_pc_(dex_pc), do_clinit_(do_clinit) {
183 DCHECK(at->IsLoadClass() || at->IsClinitCheck());
184 }
185
186 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
187 LocationSummary* locations = at_->GetLocations();
188 CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
189
190 __ Bind(GetEntryLabel());
191 SaveLiveRegisters(codegen, locations);
192
193 InvokeRuntimeCallingConvention calling_convention;
194 __ LoadConst32(calling_convention.GetRegisterAt(0), cls_->GetTypeIndex());
195 int32_t entry_point_offset = do_clinit_ ? QUICK_ENTRY_POINT(pInitializeStaticStorage)
196 : QUICK_ENTRY_POINT(pInitializeType);
197 mips64_codegen->InvokeRuntime(entry_point_offset, at_, dex_pc_, this);
198 if (do_clinit_) {
199 CheckEntrypointTypes<kQuickInitializeStaticStorage, void*, uint32_t>();
200 } else {
201 CheckEntrypointTypes<kQuickInitializeType, void*, uint32_t>();
202 }
203
204 // Move the class to the desired location.
205 Location out = locations->Out();
206 if (out.IsValid()) {
207 DCHECK(out.IsRegister() && !locations->GetLiveRegisters()->ContainsCoreRegister(out.reg()));
208 Primitive::Type type = at_->GetType();
209 mips64_codegen->MoveLocation(out, calling_convention.GetReturnLocation(type), type);
210 }
211
212 RestoreLiveRegisters(codegen, locations);
213 __ B(GetExitLabel());
214 }
215
Roland Levillain46648892015-06-19 16:07:18 +0100216 const char* GetDescription() const OVERRIDE { return "LoadClassSlowPathMIPS64"; }
217
Alexey Frunze4dda3372015-06-01 18:31:49 -0700218 private:
219 // The class this slow path will load.
220 HLoadClass* const cls_;
221
222 // The instruction where this slow path is happening.
223 // (Might be the load class or an initialization check).
224 HInstruction* const at_;
225
226 // The dex PC of `at_`.
227 const uint32_t dex_pc_;
228
229 // Whether to initialize the class.
230 const bool do_clinit_;
231
232 DISALLOW_COPY_AND_ASSIGN(LoadClassSlowPathMIPS64);
233};
234
235class LoadStringSlowPathMIPS64 : public SlowPathCodeMIPS64 {
236 public:
237 explicit LoadStringSlowPathMIPS64(HLoadString* instruction) : instruction_(instruction) {}
238
239 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
240 LocationSummary* locations = instruction_->GetLocations();
241 DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
242 CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
243
244 __ Bind(GetEntryLabel());
245 SaveLiveRegisters(codegen, locations);
246
247 InvokeRuntimeCallingConvention calling_convention;
248 __ LoadConst32(calling_convention.GetRegisterAt(0), instruction_->GetStringIndex());
249 mips64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pResolveString),
250 instruction_,
251 instruction_->GetDexPc(),
252 this);
253 CheckEntrypointTypes<kQuickResolveString, void*, uint32_t>();
254 Primitive::Type type = instruction_->GetType();
255 mips64_codegen->MoveLocation(locations->Out(),
256 calling_convention.GetReturnLocation(type),
257 type);
258
259 RestoreLiveRegisters(codegen, locations);
260 __ B(GetExitLabel());
261 }
262
Roland Levillain46648892015-06-19 16:07:18 +0100263 const char* GetDescription() const OVERRIDE { return "LoadStringSlowPathMIPS64"; }
264
Alexey Frunze4dda3372015-06-01 18:31:49 -0700265 private:
266 HLoadString* const instruction_;
267
268 DISALLOW_COPY_AND_ASSIGN(LoadStringSlowPathMIPS64);
269};
270
271class NullCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 {
272 public:
273 explicit NullCheckSlowPathMIPS64(HNullCheck* instr) : instruction_(instr) {}
274
275 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
276 CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
277 __ Bind(GetEntryLabel());
David Brazdil77a48ae2015-09-15 12:34:04 +0000278 if (instruction_->CanThrowIntoCatchBlock()) {
279 // Live registers will be restored in the catch block if caught.
280 SaveLiveRegisters(codegen, instruction_->GetLocations());
281 }
Alexey Frunze4dda3372015-06-01 18:31:49 -0700282 mips64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pThrowNullPointer),
283 instruction_,
284 instruction_->GetDexPc(),
285 this);
286 CheckEntrypointTypes<kQuickThrowNullPointer, void, void>();
287 }
288
Alexandre Rames8158f282015-08-07 10:26:17 +0100289 bool IsFatal() const OVERRIDE { return true; }
290
Roland Levillain46648892015-06-19 16:07:18 +0100291 const char* GetDescription() const OVERRIDE { return "NullCheckSlowPathMIPS64"; }
292
Alexey Frunze4dda3372015-06-01 18:31:49 -0700293 private:
294 HNullCheck* const instruction_;
295
296 DISALLOW_COPY_AND_ASSIGN(NullCheckSlowPathMIPS64);
297};
298
299class SuspendCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 {
300 public:
Roland Levillain3887c462015-08-12 18:15:42 +0100301 SuspendCheckSlowPathMIPS64(HSuspendCheck* instruction, HBasicBlock* successor)
Alexey Frunze4dda3372015-06-01 18:31:49 -0700302 : instruction_(instruction), successor_(successor) {}
303
304 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
305 CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
306 __ Bind(GetEntryLabel());
307 SaveLiveRegisters(codegen, instruction_->GetLocations());
308 mips64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pTestSuspend),
309 instruction_,
310 instruction_->GetDexPc(),
311 this);
312 CheckEntrypointTypes<kQuickTestSuspend, void, void>();
313 RestoreLiveRegisters(codegen, instruction_->GetLocations());
314 if (successor_ == nullptr) {
315 __ B(GetReturnLabel());
316 } else {
317 __ B(mips64_codegen->GetLabelOf(successor_));
318 }
319 }
320
321 Label* GetReturnLabel() {
322 DCHECK(successor_ == nullptr);
323 return &return_label_;
324 }
325
Roland Levillain46648892015-06-19 16:07:18 +0100326 const char* GetDescription() const OVERRIDE { return "SuspendCheckSlowPathMIPS64"; }
327
Alexey Frunze4dda3372015-06-01 18:31:49 -0700328 private:
329 HSuspendCheck* const instruction_;
330 // If not null, the block to branch to after the suspend check.
331 HBasicBlock* const successor_;
332
333 // If `successor_` is null, the label to branch to after the suspend check.
334 Label return_label_;
335
336 DISALLOW_COPY_AND_ASSIGN(SuspendCheckSlowPathMIPS64);
337};
338
339class TypeCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 {
340 public:
Serban Constantinescu5a6cc492015-08-13 15:20:25 +0100341 explicit TypeCheckSlowPathMIPS64(HInstruction* instruction) : instruction_(instruction) {}
Alexey Frunze4dda3372015-06-01 18:31:49 -0700342
343 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
344 LocationSummary* locations = instruction_->GetLocations();
Serban Constantinescu5a6cc492015-08-13 15:20:25 +0100345 Location object_class = instruction_->IsCheckCast() ? locations->GetTemp(0)
346 : locations->Out();
347 uint32_t dex_pc = instruction_->GetDexPc();
Alexey Frunze4dda3372015-06-01 18:31:49 -0700348 DCHECK(instruction_->IsCheckCast()
349 || !locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
350 CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
351
352 __ Bind(GetEntryLabel());
353 SaveLiveRegisters(codegen, locations);
354
355 // We're moving two locations to locations that could overlap, so we need a parallel
356 // move resolver.
357 InvokeRuntimeCallingConvention calling_convention;
Serban Constantinescu5a6cc492015-08-13 15:20:25 +0100358 codegen->EmitParallelMoves(locations->InAt(1),
Alexey Frunze4dda3372015-06-01 18:31:49 -0700359 Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
360 Primitive::kPrimNot,
Serban Constantinescu5a6cc492015-08-13 15:20:25 +0100361 object_class,
Alexey Frunze4dda3372015-06-01 18:31:49 -0700362 Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
363 Primitive::kPrimNot);
364
365 if (instruction_->IsInstanceOf()) {
366 mips64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pInstanceofNonTrivial),
367 instruction_,
Serban Constantinescu5a6cc492015-08-13 15:20:25 +0100368 dex_pc,
Alexey Frunze4dda3372015-06-01 18:31:49 -0700369 this);
370 Primitive::Type ret_type = instruction_->GetType();
371 Location ret_loc = calling_convention.GetReturnLocation(ret_type);
372 mips64_codegen->MoveLocation(locations->Out(), ret_loc, ret_type);
373 CheckEntrypointTypes<kQuickInstanceofNonTrivial,
374 uint32_t,
375 const mirror::Class*,
376 const mirror::Class*>();
377 } else {
378 DCHECK(instruction_->IsCheckCast());
Serban Constantinescu5a6cc492015-08-13 15:20:25 +0100379 mips64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pCheckCast), instruction_, dex_pc, this);
Alexey Frunze4dda3372015-06-01 18:31:49 -0700380 CheckEntrypointTypes<kQuickCheckCast, void, const mirror::Class*, const mirror::Class*>();
381 }
382
383 RestoreLiveRegisters(codegen, locations);
384 __ B(GetExitLabel());
385 }
386
Roland Levillain46648892015-06-19 16:07:18 +0100387 const char* GetDescription() const OVERRIDE { return "TypeCheckSlowPathMIPS64"; }
388
Alexey Frunze4dda3372015-06-01 18:31:49 -0700389 private:
390 HInstruction* const instruction_;
Alexey Frunze4dda3372015-06-01 18:31:49 -0700391
392 DISALLOW_COPY_AND_ASSIGN(TypeCheckSlowPathMIPS64);
393};
394
395class DeoptimizationSlowPathMIPS64 : public SlowPathCodeMIPS64 {
396 public:
397 explicit DeoptimizationSlowPathMIPS64(HInstruction* instruction)
398 : instruction_(instruction) {}
399
400 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
401 __ Bind(GetEntryLabel());
402 SaveLiveRegisters(codegen, instruction_->GetLocations());
403 DCHECK(instruction_->IsDeoptimize());
404 HDeoptimize* deoptimize = instruction_->AsDeoptimize();
405 uint32_t dex_pc = deoptimize->GetDexPc();
406 CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
407 mips64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pDeoptimize), instruction_, dex_pc, this);
408 }
409
Roland Levillain46648892015-06-19 16:07:18 +0100410 const char* GetDescription() const OVERRIDE { return "DeoptimizationSlowPathMIPS64"; }
411
Alexey Frunze4dda3372015-06-01 18:31:49 -0700412 private:
413 HInstruction* const instruction_;
414 DISALLOW_COPY_AND_ASSIGN(DeoptimizationSlowPathMIPS64);
415};
416
417CodeGeneratorMIPS64::CodeGeneratorMIPS64(HGraph* graph,
418 const Mips64InstructionSetFeatures& isa_features,
Serban Constantinescuecc43662015-08-13 13:33:12 +0100419 const CompilerOptions& compiler_options,
420 OptimizingCompilerStats* stats)
Alexey Frunze4dda3372015-06-01 18:31:49 -0700421 : CodeGenerator(graph,
422 kNumberOfGpuRegisters,
423 kNumberOfFpuRegisters,
424 0, // kNumberOfRegisterPairs
425 ComputeRegisterMask(reinterpret_cast<const int*>(kCoreCalleeSaves),
426 arraysize(kCoreCalleeSaves)),
427 ComputeRegisterMask(reinterpret_cast<const int*>(kFpuCalleeSaves),
428 arraysize(kFpuCalleeSaves)),
Serban Constantinescuecc43662015-08-13 13:33:12 +0100429 compiler_options,
430 stats),
Vladimir Marko225b6462015-09-28 12:17:40 +0100431 block_labels_(nullptr),
Alexey Frunze4dda3372015-06-01 18:31:49 -0700432 location_builder_(graph, this),
433 instruction_visitor_(graph, this),
434 move_resolver_(graph->GetArena(), this),
435 isa_features_(isa_features) {
436 // Save RA (containing the return address) to mimic Quick.
437 AddAllocatedRegister(Location::RegisterLocation(RA));
438}
439
440#undef __
441#define __ down_cast<Mips64Assembler*>(GetAssembler())->
442#define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kMips64WordSize, x).Int32Value()
443
444void CodeGeneratorMIPS64::Finalize(CodeAllocator* allocator) {
445 CodeGenerator::Finalize(allocator);
446}
447
448Mips64Assembler* ParallelMoveResolverMIPS64::GetAssembler() const {
449 return codegen_->GetAssembler();
450}
451
452void ParallelMoveResolverMIPS64::EmitMove(size_t index) {
Vladimir Marko225b6462015-09-28 12:17:40 +0100453 DCHECK_LT(index, moves_.size());
454 MoveOperands* move = moves_[index];
Alexey Frunze4dda3372015-06-01 18:31:49 -0700455 codegen_->MoveLocation(move->GetDestination(), move->GetSource(), move->GetType());
456}
457
458void ParallelMoveResolverMIPS64::EmitSwap(size_t index) {
Vladimir Marko225b6462015-09-28 12:17:40 +0100459 DCHECK_LT(index, moves_.size());
460 MoveOperands* move = moves_[index];
Alexey Frunze4dda3372015-06-01 18:31:49 -0700461 codegen_->SwapLocations(move->GetDestination(), move->GetSource(), move->GetType());
462}
463
464void ParallelMoveResolverMIPS64::RestoreScratch(int reg) {
465 // Pop reg
466 __ Ld(GpuRegister(reg), SP, 0);
467 __ DecreaseFrameSize(kMips64WordSize);
468}
469
470void ParallelMoveResolverMIPS64::SpillScratch(int reg) {
471 // Push reg
472 __ IncreaseFrameSize(kMips64WordSize);
473 __ Sd(GpuRegister(reg), SP, 0);
474}
475
476void ParallelMoveResolverMIPS64::Exchange(int index1, int index2, bool double_slot) {
477 LoadOperandType load_type = double_slot ? kLoadDoubleword : kLoadWord;
478 StoreOperandType store_type = double_slot ? kStoreDoubleword : kStoreWord;
479 // Allocate a scratch register other than TMP, if available.
480 // Else, spill V0 (arbitrary choice) and use it as a scratch register (it will be
481 // automatically unspilled when the scratch scope object is destroyed).
482 ScratchRegisterScope ensure_scratch(this, TMP, V0, codegen_->GetNumberOfCoreRegisters());
483 // If V0 spills onto the stack, SP-relative offsets need to be adjusted.
484 int stack_offset = ensure_scratch.IsSpilled() ? kMips64WordSize : 0;
485 __ LoadFromOffset(load_type,
486 GpuRegister(ensure_scratch.GetRegister()),
487 SP,
488 index1 + stack_offset);
489 __ LoadFromOffset(load_type,
490 TMP,
491 SP,
492 index2 + stack_offset);
493 __ StoreToOffset(store_type,
494 GpuRegister(ensure_scratch.GetRegister()),
495 SP,
496 index2 + stack_offset);
497 __ StoreToOffset(store_type, TMP, SP, index1 + stack_offset);
498}
499
500static dwarf::Reg DWARFReg(GpuRegister reg) {
501 return dwarf::Reg::Mips64Core(static_cast<int>(reg));
502}
503
504// TODO: mapping of floating-point registers to DWARF
505
506void CodeGeneratorMIPS64::GenerateFrameEntry() {
507 __ Bind(&frame_entry_label_);
508
509 bool do_overflow_check = FrameNeedsStackCheck(GetFrameSize(), kMips64) || !IsLeafMethod();
510
511 if (do_overflow_check) {
512 __ LoadFromOffset(kLoadWord,
513 ZERO,
514 SP,
515 -static_cast<int32_t>(GetStackOverflowReservedBytes(kMips64)));
516 RecordPcInfo(nullptr, 0);
517 }
518
519 // TODO: anything related to T9/GP/GOT/PIC/.so's?
520
521 if (HasEmptyFrame()) {
522 return;
523 }
524
525 // Make sure the frame size isn't unreasonably large. Per the various APIs
526 // it looks like it should always be less than 2GB in size, which allows
527 // us using 32-bit signed offsets from the stack pointer.
528 if (GetFrameSize() > 0x7FFFFFFF)
529 LOG(FATAL) << "Stack frame larger than 2GB";
530
531 // Spill callee-saved registers.
532 // Note that their cumulative size is small and they can be indexed using
533 // 16-bit offsets.
534
535 // TODO: increment/decrement SP in one step instead of two or remove this comment.
536
537 uint32_t ofs = FrameEntrySpillSize();
538 __ IncreaseFrameSize(ofs);
539
540 for (int i = arraysize(kCoreCalleeSaves) - 1; i >= 0; --i) {
541 GpuRegister reg = kCoreCalleeSaves[i];
542 if (allocated_registers_.ContainsCoreRegister(reg)) {
543 ofs -= kMips64WordSize;
544 __ Sd(reg, SP, ofs);
545 __ cfi().RelOffset(DWARFReg(reg), ofs);
546 }
547 }
548
549 for (int i = arraysize(kFpuCalleeSaves) - 1; i >= 0; --i) {
550 FpuRegister reg = kFpuCalleeSaves[i];
551 if (allocated_registers_.ContainsFloatingPointRegister(reg)) {
552 ofs -= kMips64WordSize;
553 __ Sdc1(reg, SP, ofs);
554 // TODO: __ cfi().RelOffset(DWARFReg(reg), ofs);
555 }
556 }
557
558 // Allocate the rest of the frame and store the current method pointer
559 // at its end.
560
561 __ IncreaseFrameSize(GetFrameSize() - FrameEntrySpillSize());
562
563 static_assert(IsInt<16>(kCurrentMethodStackOffset),
564 "kCurrentMethodStackOffset must fit into int16_t");
565 __ Sd(kMethodRegisterArgument, SP, kCurrentMethodStackOffset);
566}
567
568void CodeGeneratorMIPS64::GenerateFrameExit() {
569 __ cfi().RememberState();
570
571 // TODO: anything related to T9/GP/GOT/PIC/.so's?
572
573 if (!HasEmptyFrame()) {
574 // Deallocate the rest of the frame.
575
576 __ DecreaseFrameSize(GetFrameSize() - FrameEntrySpillSize());
577
578 // Restore callee-saved registers.
579 // Note that their cumulative size is small and they can be indexed using
580 // 16-bit offsets.
581
582 // TODO: increment/decrement SP in one step instead of two or remove this comment.
583
584 uint32_t ofs = 0;
585
586 for (size_t i = 0; i < arraysize(kFpuCalleeSaves); ++i) {
587 FpuRegister reg = kFpuCalleeSaves[i];
588 if (allocated_registers_.ContainsFloatingPointRegister(reg)) {
589 __ Ldc1(reg, SP, ofs);
590 ofs += kMips64WordSize;
591 // TODO: __ cfi().Restore(DWARFReg(reg));
592 }
593 }
594
595 for (size_t i = 0; i < arraysize(kCoreCalleeSaves); ++i) {
596 GpuRegister reg = kCoreCalleeSaves[i];
597 if (allocated_registers_.ContainsCoreRegister(reg)) {
598 __ Ld(reg, SP, ofs);
599 ofs += kMips64WordSize;
600 __ cfi().Restore(DWARFReg(reg));
601 }
602 }
603
604 DCHECK_EQ(ofs, FrameEntrySpillSize());
605 __ DecreaseFrameSize(ofs);
606 }
607
608 __ Jr(RA);
609
610 __ cfi().RestoreState();
611 __ cfi().DefCFAOffset(GetFrameSize());
612}
613
614void CodeGeneratorMIPS64::Bind(HBasicBlock* block) {
615 __ Bind(GetLabelOf(block));
616}
617
618void CodeGeneratorMIPS64::MoveLocation(Location destination,
619 Location source,
Calin Juravlee460d1d2015-09-29 04:52:17 +0100620 Primitive::Type dst_type) {
Alexey Frunze4dda3372015-06-01 18:31:49 -0700621 if (source.Equals(destination)) {
622 return;
623 }
624
625 // A valid move can always be inferred from the destination and source
626 // locations. When moving from and to a register, the argument type can be
627 // used to generate 32bit instead of 64bit moves.
Calin Juravlee460d1d2015-09-29 04:52:17 +0100628 bool unspecified_type = (dst_type == Primitive::kPrimVoid);
Alexey Frunze4dda3372015-06-01 18:31:49 -0700629 DCHECK_EQ(unspecified_type, false);
630
631 if (destination.IsRegister() || destination.IsFpuRegister()) {
632 if (unspecified_type) {
633 HConstant* src_cst = source.IsConstant() ? source.GetConstant() : nullptr;
634 if (source.IsStackSlot() ||
635 (src_cst != nullptr && (src_cst->IsIntConstant()
636 || src_cst->IsFloatConstant()
637 || src_cst->IsNullConstant()))) {
638 // For stack slots and 32bit constants, a 64bit type is appropriate.
Calin Juravlee460d1d2015-09-29 04:52:17 +0100639 dst_type = destination.IsRegister() ? Primitive::kPrimInt : Primitive::kPrimFloat;
Alexey Frunze4dda3372015-06-01 18:31:49 -0700640 } else {
641 // If the source is a double stack slot or a 64bit constant, a 64bit
642 // type is appropriate. Else the source is a register, and since the
643 // type has not been specified, we chose a 64bit type to force a 64bit
644 // move.
Calin Juravlee460d1d2015-09-29 04:52:17 +0100645 dst_type = destination.IsRegister() ? Primitive::kPrimLong : Primitive::kPrimDouble;
Alexey Frunze4dda3372015-06-01 18:31:49 -0700646 }
647 }
Calin Juravlee460d1d2015-09-29 04:52:17 +0100648 DCHECK((destination.IsFpuRegister() && Primitive::IsFloatingPointType(dst_type)) ||
649 (destination.IsRegister() && !Primitive::IsFloatingPointType(dst_type)));
Alexey Frunze4dda3372015-06-01 18:31:49 -0700650 if (source.IsStackSlot() || source.IsDoubleStackSlot()) {
651 // Move to GPR/FPR from stack
652 LoadOperandType load_type = source.IsStackSlot() ? kLoadWord : kLoadDoubleword;
Calin Juravlee460d1d2015-09-29 04:52:17 +0100653 if (Primitive::IsFloatingPointType(dst_type)) {
Alexey Frunze4dda3372015-06-01 18:31:49 -0700654 __ LoadFpuFromOffset(load_type,
655 destination.AsFpuRegister<FpuRegister>(),
656 SP,
657 source.GetStackIndex());
658 } else {
659 // TODO: use load_type = kLoadUnsignedWord when type == Primitive::kPrimNot.
660 __ LoadFromOffset(load_type,
661 destination.AsRegister<GpuRegister>(),
662 SP,
663 source.GetStackIndex());
664 }
665 } else if (source.IsConstant()) {
666 // Move to GPR/FPR from constant
667 GpuRegister gpr = AT;
Calin Juravlee460d1d2015-09-29 04:52:17 +0100668 if (!Primitive::IsFloatingPointType(dst_type)) {
Alexey Frunze4dda3372015-06-01 18:31:49 -0700669 gpr = destination.AsRegister<GpuRegister>();
670 }
Calin Juravlee460d1d2015-09-29 04:52:17 +0100671 if (dst_type == Primitive::kPrimInt || dst_type == Primitive::kPrimFloat) {
Alexey Frunze4dda3372015-06-01 18:31:49 -0700672 __ LoadConst32(gpr, GetInt32ValueOf(source.GetConstant()->AsConstant()));
673 } else {
674 __ LoadConst64(gpr, GetInt64ValueOf(source.GetConstant()->AsConstant()));
675 }
Calin Juravlee460d1d2015-09-29 04:52:17 +0100676 if (dst_type == Primitive::kPrimFloat) {
Alexey Frunze4dda3372015-06-01 18:31:49 -0700677 __ Mtc1(gpr, destination.AsFpuRegister<FpuRegister>());
Calin Juravlee460d1d2015-09-29 04:52:17 +0100678 } else if (dst_type == Primitive::kPrimDouble) {
Alexey Frunze4dda3372015-06-01 18:31:49 -0700679 __ Dmtc1(gpr, destination.AsFpuRegister<FpuRegister>());
680 }
Calin Juravlee460d1d2015-09-29 04:52:17 +0100681 } else if (source.IsRegister()) {
Alexey Frunze4dda3372015-06-01 18:31:49 -0700682 if (destination.IsRegister()) {
683 // Move to GPR from GPR
684 __ Move(destination.AsRegister<GpuRegister>(), source.AsRegister<GpuRegister>());
685 } else {
Calin Juravlee460d1d2015-09-29 04:52:17 +0100686 DCHECK(destination.IsFpuRegister());
687 if (Primitive::Is64BitType(dst_type)) {
688 __ Dmtc1(source.AsRegister<GpuRegister>(), destination.AsFpuRegister<FpuRegister>());
689 } else {
690 __ Mtc1(source.AsRegister<GpuRegister>(), destination.AsFpuRegister<FpuRegister>());
691 }
692 }
693 } else if (source.IsFpuRegister()) {
694 if (destination.IsFpuRegister()) {
Alexey Frunze4dda3372015-06-01 18:31:49 -0700695 // Move to FPR from FPR
Calin Juravlee460d1d2015-09-29 04:52:17 +0100696 if (dst_type == Primitive::kPrimFloat) {
Alexey Frunze4dda3372015-06-01 18:31:49 -0700697 __ MovS(destination.AsFpuRegister<FpuRegister>(), source.AsFpuRegister<FpuRegister>());
698 } else {
Calin Juravlee460d1d2015-09-29 04:52:17 +0100699 DCHECK_EQ(dst_type, Primitive::kPrimDouble);
Alexey Frunze4dda3372015-06-01 18:31:49 -0700700 __ MovD(destination.AsFpuRegister<FpuRegister>(), source.AsFpuRegister<FpuRegister>());
701 }
Calin Juravlee460d1d2015-09-29 04:52:17 +0100702 } else {
703 DCHECK(destination.IsRegister());
704 if (Primitive::Is64BitType(dst_type)) {
705 __ Dmfc1(destination.AsRegister<GpuRegister>(), source.AsFpuRegister<FpuRegister>());
706 } else {
707 __ Mfc1(destination.AsRegister<GpuRegister>(), source.AsFpuRegister<FpuRegister>());
708 }
Alexey Frunze4dda3372015-06-01 18:31:49 -0700709 }
710 }
711 } else { // The destination is not a register. It must be a stack slot.
712 DCHECK(destination.IsStackSlot() || destination.IsDoubleStackSlot());
713 if (source.IsRegister() || source.IsFpuRegister()) {
714 if (unspecified_type) {
715 if (source.IsRegister()) {
Calin Juravlee460d1d2015-09-29 04:52:17 +0100716 dst_type = destination.IsStackSlot() ? Primitive::kPrimInt : Primitive::kPrimLong;
Alexey Frunze4dda3372015-06-01 18:31:49 -0700717 } else {
Calin Juravlee460d1d2015-09-29 04:52:17 +0100718 dst_type = destination.IsStackSlot() ? Primitive::kPrimFloat : Primitive::kPrimDouble;
Alexey Frunze4dda3372015-06-01 18:31:49 -0700719 }
720 }
Calin Juravlee460d1d2015-09-29 04:52:17 +0100721 DCHECK((destination.IsDoubleStackSlot() == Primitive::Is64BitType(dst_type)) &&
722 (source.IsFpuRegister() == Primitive::IsFloatingPointType(dst_type)));
Alexey Frunze4dda3372015-06-01 18:31:49 -0700723 // Move to stack from GPR/FPR
724 StoreOperandType store_type = destination.IsStackSlot() ? kStoreWord : kStoreDoubleword;
725 if (source.IsRegister()) {
726 __ StoreToOffset(store_type,
727 source.AsRegister<GpuRegister>(),
728 SP,
729 destination.GetStackIndex());
730 } else {
731 __ StoreFpuToOffset(store_type,
732 source.AsFpuRegister<FpuRegister>(),
733 SP,
734 destination.GetStackIndex());
735 }
736 } else if (source.IsConstant()) {
737 // Move to stack from constant
738 HConstant* src_cst = source.GetConstant();
739 StoreOperandType store_type = destination.IsStackSlot() ? kStoreWord : kStoreDoubleword;
740 if (destination.IsStackSlot()) {
741 __ LoadConst32(TMP, GetInt32ValueOf(src_cst->AsConstant()));
742 } else {
743 __ LoadConst64(TMP, GetInt64ValueOf(src_cst->AsConstant()));
744 }
745 __ StoreToOffset(store_type, TMP, SP, destination.GetStackIndex());
746 } else {
747 DCHECK(source.IsStackSlot() || source.IsDoubleStackSlot());
748 DCHECK_EQ(source.IsDoubleStackSlot(), destination.IsDoubleStackSlot());
749 // Move to stack from stack
750 if (destination.IsStackSlot()) {
751 __ LoadFromOffset(kLoadWord, TMP, SP, source.GetStackIndex());
752 __ StoreToOffset(kStoreWord, TMP, SP, destination.GetStackIndex());
753 } else {
754 __ LoadFromOffset(kLoadDoubleword, TMP, SP, source.GetStackIndex());
755 __ StoreToOffset(kStoreDoubleword, TMP, SP, destination.GetStackIndex());
756 }
757 }
758 }
759}
760
761void CodeGeneratorMIPS64::SwapLocations(Location loc1,
762 Location loc2,
763 Primitive::Type type ATTRIBUTE_UNUSED) {
764 DCHECK(!loc1.IsConstant());
765 DCHECK(!loc2.IsConstant());
766
767 if (loc1.Equals(loc2)) {
768 return;
769 }
770
771 bool is_slot1 = loc1.IsStackSlot() || loc1.IsDoubleStackSlot();
772 bool is_slot2 = loc2.IsStackSlot() || loc2.IsDoubleStackSlot();
773 bool is_fp_reg1 = loc1.IsFpuRegister();
774 bool is_fp_reg2 = loc2.IsFpuRegister();
775
776 if (loc2.IsRegister() && loc1.IsRegister()) {
777 // Swap 2 GPRs
778 GpuRegister r1 = loc1.AsRegister<GpuRegister>();
779 GpuRegister r2 = loc2.AsRegister<GpuRegister>();
780 __ Move(TMP, r2);
781 __ Move(r2, r1);
782 __ Move(r1, TMP);
783 } else if (is_fp_reg2 && is_fp_reg1) {
784 // Swap 2 FPRs
785 FpuRegister r1 = loc1.AsFpuRegister<FpuRegister>();
786 FpuRegister r2 = loc2.AsFpuRegister<FpuRegister>();
787 // TODO: Can MOV.S/MOV.D be used here to save one instruction?
788 // Need to distinguish float from double, right?
789 __ Dmfc1(TMP, r2);
790 __ Dmfc1(AT, r1);
791 __ Dmtc1(TMP, r1);
792 __ Dmtc1(AT, r2);
793 } else if (is_slot1 != is_slot2) {
794 // Swap GPR/FPR and stack slot
795 Location reg_loc = is_slot1 ? loc2 : loc1;
796 Location mem_loc = is_slot1 ? loc1 : loc2;
797 LoadOperandType load_type = mem_loc.IsStackSlot() ? kLoadWord : kLoadDoubleword;
798 StoreOperandType store_type = mem_loc.IsStackSlot() ? kStoreWord : kStoreDoubleword;
799 // TODO: use load_type = kLoadUnsignedWord when type == Primitive::kPrimNot.
800 __ LoadFromOffset(load_type, TMP, SP, mem_loc.GetStackIndex());
801 if (reg_loc.IsFpuRegister()) {
802 __ StoreFpuToOffset(store_type,
803 reg_loc.AsFpuRegister<FpuRegister>(),
804 SP,
805 mem_loc.GetStackIndex());
806 // TODO: review this MTC1/DMTC1 move
807 if (mem_loc.IsStackSlot()) {
808 __ Mtc1(TMP, reg_loc.AsFpuRegister<FpuRegister>());
809 } else {
810 DCHECK(mem_loc.IsDoubleStackSlot());
811 __ Dmtc1(TMP, reg_loc.AsFpuRegister<FpuRegister>());
812 }
813 } else {
814 __ StoreToOffset(store_type, reg_loc.AsRegister<GpuRegister>(), SP, mem_loc.GetStackIndex());
815 __ Move(reg_loc.AsRegister<GpuRegister>(), TMP);
816 }
817 } else if (is_slot1 && is_slot2) {
818 move_resolver_.Exchange(loc1.GetStackIndex(),
819 loc2.GetStackIndex(),
820 loc1.IsDoubleStackSlot());
821 } else {
822 LOG(FATAL) << "Unimplemented swap between locations " << loc1 << " and " << loc2;
823 }
824}
825
826void CodeGeneratorMIPS64::Move(HInstruction* instruction,
827 Location location,
828 HInstruction* move_for) {
829 LocationSummary* locations = instruction->GetLocations();
830 Primitive::Type type = instruction->GetType();
831 DCHECK_NE(type, Primitive::kPrimVoid);
832
833 if (instruction->IsCurrentMethod()) {
834 MoveLocation(location, Location::DoubleStackSlot(kCurrentMethodStackOffset), type);
835 } else if (locations != nullptr && locations->Out().Equals(location)) {
836 return;
837 } else if (instruction->IsIntConstant()
838 || instruction->IsLongConstant()
839 || instruction->IsNullConstant()) {
840 if (location.IsRegister()) {
841 // Move to GPR from constant
842 GpuRegister dst = location.AsRegister<GpuRegister>();
843 if (instruction->IsNullConstant() || instruction->IsIntConstant()) {
844 __ LoadConst32(dst, GetInt32ValueOf(instruction->AsConstant()));
845 } else {
846 __ LoadConst64(dst, instruction->AsLongConstant()->GetValue());
847 }
848 } else {
849 DCHECK(location.IsStackSlot() || location.IsDoubleStackSlot());
850 // Move to stack from constant
851 if (location.IsStackSlot()) {
852 __ LoadConst32(TMP, GetInt32ValueOf(instruction->AsConstant()));
853 __ StoreToOffset(kStoreWord, TMP, SP, location.GetStackIndex());
854 } else {
855 __ LoadConst64(TMP, instruction->AsLongConstant()->GetValue());
856 __ StoreToOffset(kStoreDoubleword, TMP, SP, location.GetStackIndex());
857 }
858 }
859 } else if (instruction->IsTemporary()) {
860 Location temp_location = GetTemporaryLocation(instruction->AsTemporary());
861 MoveLocation(location, temp_location, type);
862 } else if (instruction->IsLoadLocal()) {
863 uint32_t stack_slot = GetStackSlot(instruction->AsLoadLocal()->GetLocal());
864 if (Primitive::Is64BitType(type)) {
865 MoveLocation(location, Location::DoubleStackSlot(stack_slot), type);
866 } else {
867 MoveLocation(location, Location::StackSlot(stack_slot), type);
868 }
869 } else {
870 DCHECK((instruction->GetNext() == move_for) || instruction->GetNext()->IsTemporary());
871 MoveLocation(location, locations->Out(), type);
872 }
873}
874
Calin Juravle175dc732015-08-25 15:42:32 +0100875void CodeGeneratorMIPS64::MoveConstant(Location location, int32_t value) {
876 DCHECK(location.IsRegister());
877 __ LoadConst32(location.AsRegister<GpuRegister>(), value);
878}
879
Calin Juravlee460d1d2015-09-29 04:52:17 +0100880void CodeGeneratorMIPS64::AddLocationAsTemp(Location location, LocationSummary* locations) {
881 if (location.IsRegister()) {
882 locations->AddTemp(location);
883 } else {
884 UNIMPLEMENTED(FATAL) << "AddLocationAsTemp not implemented for location " << location;
885 }
886}
887
Alexey Frunze4dda3372015-06-01 18:31:49 -0700888Location CodeGeneratorMIPS64::GetStackLocation(HLoadLocal* load) const {
889 Primitive::Type type = load->GetType();
890
891 switch (type) {
892 case Primitive::kPrimNot:
893 case Primitive::kPrimInt:
894 case Primitive::kPrimFloat:
895 return Location::StackSlot(GetStackSlot(load->GetLocal()));
896
897 case Primitive::kPrimLong:
898 case Primitive::kPrimDouble:
899 return Location::DoubleStackSlot(GetStackSlot(load->GetLocal()));
900
901 case Primitive::kPrimBoolean:
902 case Primitive::kPrimByte:
903 case Primitive::kPrimChar:
904 case Primitive::kPrimShort:
905 case Primitive::kPrimVoid:
906 LOG(FATAL) << "Unexpected type " << type;
907 }
908
909 LOG(FATAL) << "Unreachable";
910 return Location::NoLocation();
911}
912
913void CodeGeneratorMIPS64::MarkGCCard(GpuRegister object, GpuRegister value) {
914 Label done;
915 GpuRegister card = AT;
916 GpuRegister temp = TMP;
917 __ Beqzc(value, &done);
918 __ LoadFromOffset(kLoadDoubleword,
919 card,
920 TR,
921 Thread::CardTableOffset<kMips64WordSize>().Int32Value());
922 __ Dsrl(temp, object, gc::accounting::CardTable::kCardShift);
923 __ Daddu(temp, card, temp);
924 __ Sb(card, temp, 0);
925 __ Bind(&done);
926}
927
928void CodeGeneratorMIPS64::SetupBlockedRegisters(bool is_baseline ATTRIBUTE_UNUSED) const {
929 // ZERO, K0, K1, GP, SP, RA are always reserved and can't be allocated.
930 blocked_core_registers_[ZERO] = true;
931 blocked_core_registers_[K0] = true;
932 blocked_core_registers_[K1] = true;
933 blocked_core_registers_[GP] = true;
934 blocked_core_registers_[SP] = true;
935 blocked_core_registers_[RA] = true;
936
937 // AT and TMP(T8) are used as temporary/scratch registers
938 // (similar to how AT is used by MIPS assemblers).
939 blocked_core_registers_[AT] = true;
940 blocked_core_registers_[TMP] = true;
941 blocked_fpu_registers_[FTMP] = true;
942
943 // Reserve suspend and thread registers.
944 blocked_core_registers_[S0] = true;
945 blocked_core_registers_[TR] = true;
946
947 // Reserve T9 for function calls
948 blocked_core_registers_[T9] = true;
949
950 // TODO: review; anything else?
951
952 // TODO: make these two for's conditional on is_baseline once
953 // all the issues with register saving/restoring are sorted out.
954 for (size_t i = 0; i < arraysize(kCoreCalleeSaves); ++i) {
955 blocked_core_registers_[kCoreCalleeSaves[i]] = true;
956 }
957
958 for (size_t i = 0; i < arraysize(kFpuCalleeSaves); ++i) {
959 blocked_fpu_registers_[kFpuCalleeSaves[i]] = true;
960 }
961}
962
963Location CodeGeneratorMIPS64::AllocateFreeRegister(Primitive::Type type) const {
964 if (type == Primitive::kPrimVoid) {
965 LOG(FATAL) << "Unreachable type " << type;
966 }
967
968 if (Primitive::IsFloatingPointType(type)) {
969 size_t reg = FindFreeEntry(blocked_fpu_registers_, kNumberOfFpuRegisters);
970 return Location::FpuRegisterLocation(reg);
971 } else {
972 size_t reg = FindFreeEntry(blocked_core_registers_, kNumberOfGpuRegisters);
973 return Location::RegisterLocation(reg);
974 }
975}
976
977size_t CodeGeneratorMIPS64::SaveCoreRegister(size_t stack_index, uint32_t reg_id) {
978 __ StoreToOffset(kStoreDoubleword, GpuRegister(reg_id), SP, stack_index);
979 return kMips64WordSize;
980}
981
982size_t CodeGeneratorMIPS64::RestoreCoreRegister(size_t stack_index, uint32_t reg_id) {
983 __ LoadFromOffset(kLoadDoubleword, GpuRegister(reg_id), SP, stack_index);
984 return kMips64WordSize;
985}
986
987size_t CodeGeneratorMIPS64::SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) {
988 __ StoreFpuToOffset(kStoreDoubleword, FpuRegister(reg_id), SP, stack_index);
989 return kMips64WordSize;
990}
991
992size_t CodeGeneratorMIPS64::RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) {
993 __ LoadFpuFromOffset(kLoadDoubleword, FpuRegister(reg_id), SP, stack_index);
994 return kMips64WordSize;
995}
996
997void CodeGeneratorMIPS64::DumpCoreRegister(std::ostream& stream, int reg) const {
David Brazdil9f0dece2015-09-21 18:20:26 +0100998 stream << GpuRegister(reg);
Alexey Frunze4dda3372015-06-01 18:31:49 -0700999}
1000
1001void CodeGeneratorMIPS64::DumpFloatingPointRegister(std::ostream& stream, int reg) const {
David Brazdil9f0dece2015-09-21 18:20:26 +01001002 stream << FpuRegister(reg);
Alexey Frunze4dda3372015-06-01 18:31:49 -07001003}
1004
Calin Juravle175dc732015-08-25 15:42:32 +01001005void CodeGeneratorMIPS64::InvokeRuntime(QuickEntrypointEnum entrypoint,
1006 HInstruction* instruction,
1007 uint32_t dex_pc,
1008 SlowPathCode* slow_path) {
1009 InvokeRuntime(GetThreadOffset<kMips64WordSize>(entrypoint).Int32Value(),
1010 instruction,
1011 dex_pc,
1012 slow_path);
1013}
1014
Alexey Frunze4dda3372015-06-01 18:31:49 -07001015void CodeGeneratorMIPS64::InvokeRuntime(int32_t entry_point_offset,
1016 HInstruction* instruction,
1017 uint32_t dex_pc,
1018 SlowPathCode* slow_path) {
Alexandre Rames78e3ef62015-08-12 13:43:29 +01001019 ValidateInvokeRuntime(instruction, slow_path);
Alexey Frunze4dda3372015-06-01 18:31:49 -07001020 // TODO: anything related to T9/GP/GOT/PIC/.so's?
1021 __ LoadFromOffset(kLoadDoubleword, T9, TR, entry_point_offset);
1022 __ Jalr(T9);
1023 RecordPcInfo(instruction, dex_pc, slow_path);
Alexey Frunze4dda3372015-06-01 18:31:49 -07001024}
1025
1026void InstructionCodeGeneratorMIPS64::GenerateClassInitializationCheck(SlowPathCodeMIPS64* slow_path,
1027 GpuRegister class_reg) {
1028 __ LoadFromOffset(kLoadWord, TMP, class_reg, mirror::Class::StatusOffset().Int32Value());
1029 __ LoadConst32(AT, mirror::Class::kStatusInitialized);
1030 __ Bltc(TMP, AT, slow_path->GetEntryLabel());
1031 // TODO: barrier needed?
1032 __ Bind(slow_path->GetExitLabel());
1033}
1034
1035void InstructionCodeGeneratorMIPS64::GenerateMemoryBarrier(MemBarrierKind kind ATTRIBUTE_UNUSED) {
1036 __ Sync(0); // only stype 0 is supported
1037}
1038
1039void InstructionCodeGeneratorMIPS64::GenerateSuspendCheck(HSuspendCheck* instruction,
1040 HBasicBlock* successor) {
1041 SuspendCheckSlowPathMIPS64* slow_path =
1042 new (GetGraph()->GetArena()) SuspendCheckSlowPathMIPS64(instruction, successor);
1043 codegen_->AddSlowPath(slow_path);
1044
1045 __ LoadFromOffset(kLoadUnsignedHalfword,
1046 TMP,
1047 TR,
1048 Thread::ThreadFlagsOffset<kMips64WordSize>().Int32Value());
1049 if (successor == nullptr) {
1050 __ Bnezc(TMP, slow_path->GetEntryLabel());
1051 __ Bind(slow_path->GetReturnLabel());
1052 } else {
1053 __ Beqzc(TMP, codegen_->GetLabelOf(successor));
1054 __ B(slow_path->GetEntryLabel());
1055 // slow_path will return to GetLabelOf(successor).
1056 }
1057}
1058
1059InstructionCodeGeneratorMIPS64::InstructionCodeGeneratorMIPS64(HGraph* graph,
1060 CodeGeneratorMIPS64* codegen)
1061 : HGraphVisitor(graph),
1062 assembler_(codegen->GetAssembler()),
1063 codegen_(codegen) {}
1064
1065void LocationsBuilderMIPS64::HandleBinaryOp(HBinaryOperation* instruction) {
1066 DCHECK_EQ(instruction->InputCount(), 2U);
1067 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
1068 Primitive::Type type = instruction->GetResultType();
1069 switch (type) {
1070 case Primitive::kPrimInt:
1071 case Primitive::kPrimLong: {
1072 locations->SetInAt(0, Location::RequiresRegister());
1073 HInstruction* right = instruction->InputAt(1);
1074 bool can_use_imm = false;
1075 if (right->IsConstant()) {
1076 int64_t imm = CodeGenerator::GetInt64ValueOf(right->AsConstant());
1077 if (instruction->IsAnd() || instruction->IsOr() || instruction->IsXor()) {
1078 can_use_imm = IsUint<16>(imm);
1079 } else if (instruction->IsAdd()) {
1080 can_use_imm = IsInt<16>(imm);
1081 } else {
1082 DCHECK(instruction->IsSub());
1083 can_use_imm = IsInt<16>(-imm);
1084 }
1085 }
1086 if (can_use_imm)
1087 locations->SetInAt(1, Location::ConstantLocation(right->AsConstant()));
1088 else
1089 locations->SetInAt(1, Location::RequiresRegister());
1090 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1091 }
1092 break;
1093
1094 case Primitive::kPrimFloat:
1095 case Primitive::kPrimDouble:
1096 locations->SetInAt(0, Location::RequiresFpuRegister());
1097 locations->SetInAt(1, Location::RequiresFpuRegister());
1098 locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
1099 break;
1100
1101 default:
1102 LOG(FATAL) << "Unexpected " << instruction->DebugName() << " type " << type;
1103 }
1104}
1105
1106void InstructionCodeGeneratorMIPS64::HandleBinaryOp(HBinaryOperation* instruction) {
1107 Primitive::Type type = instruction->GetType();
1108 LocationSummary* locations = instruction->GetLocations();
1109
1110 switch (type) {
1111 case Primitive::kPrimInt:
1112 case Primitive::kPrimLong: {
1113 GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
1114 GpuRegister lhs = locations->InAt(0).AsRegister<GpuRegister>();
1115 Location rhs_location = locations->InAt(1);
1116
1117 GpuRegister rhs_reg = ZERO;
1118 int64_t rhs_imm = 0;
1119 bool use_imm = rhs_location.IsConstant();
1120 if (use_imm) {
1121 rhs_imm = CodeGenerator::GetInt64ValueOf(rhs_location.GetConstant());
1122 } else {
1123 rhs_reg = rhs_location.AsRegister<GpuRegister>();
1124 }
1125
1126 if (instruction->IsAnd()) {
1127 if (use_imm)
1128 __ Andi(dst, lhs, rhs_imm);
1129 else
1130 __ And(dst, lhs, rhs_reg);
1131 } else if (instruction->IsOr()) {
1132 if (use_imm)
1133 __ Ori(dst, lhs, rhs_imm);
1134 else
1135 __ Or(dst, lhs, rhs_reg);
1136 } else if (instruction->IsXor()) {
1137 if (use_imm)
1138 __ Xori(dst, lhs, rhs_imm);
1139 else
1140 __ Xor(dst, lhs, rhs_reg);
1141 } else if (instruction->IsAdd()) {
1142 if (type == Primitive::kPrimInt) {
1143 if (use_imm)
1144 __ Addiu(dst, lhs, rhs_imm);
1145 else
1146 __ Addu(dst, lhs, rhs_reg);
1147 } else {
1148 if (use_imm)
1149 __ Daddiu(dst, lhs, rhs_imm);
1150 else
1151 __ Daddu(dst, lhs, rhs_reg);
1152 }
1153 } else {
1154 DCHECK(instruction->IsSub());
1155 if (type == Primitive::kPrimInt) {
1156 if (use_imm)
1157 __ Addiu(dst, lhs, -rhs_imm);
1158 else
1159 __ Subu(dst, lhs, rhs_reg);
1160 } else {
1161 if (use_imm)
1162 __ Daddiu(dst, lhs, -rhs_imm);
1163 else
1164 __ Dsubu(dst, lhs, rhs_reg);
1165 }
1166 }
1167 break;
1168 }
1169 case Primitive::kPrimFloat:
1170 case Primitive::kPrimDouble: {
1171 FpuRegister dst = locations->Out().AsFpuRegister<FpuRegister>();
1172 FpuRegister lhs = locations->InAt(0).AsFpuRegister<FpuRegister>();
1173 FpuRegister rhs = locations->InAt(1).AsFpuRegister<FpuRegister>();
1174 if (instruction->IsAdd()) {
1175 if (type == Primitive::kPrimFloat)
1176 __ AddS(dst, lhs, rhs);
1177 else
1178 __ AddD(dst, lhs, rhs);
1179 } else if (instruction->IsSub()) {
1180 if (type == Primitive::kPrimFloat)
1181 __ SubS(dst, lhs, rhs);
1182 else
1183 __ SubD(dst, lhs, rhs);
1184 } else {
1185 LOG(FATAL) << "Unexpected floating-point binary operation";
1186 }
1187 break;
1188 }
1189 default:
1190 LOG(FATAL) << "Unexpected binary operation type " << type;
1191 }
1192}
1193
1194void LocationsBuilderMIPS64::HandleShift(HBinaryOperation* instr) {
1195 DCHECK(instr->IsShl() || instr->IsShr() || instr->IsUShr());
1196
1197 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instr);
1198 Primitive::Type type = instr->GetResultType();
1199 switch (type) {
1200 case Primitive::kPrimInt:
1201 case Primitive::kPrimLong: {
1202 locations->SetInAt(0, Location::RequiresRegister());
1203 locations->SetInAt(1, Location::RegisterOrConstant(instr->InputAt(1)));
1204 locations->SetOut(Location::RequiresRegister());
1205 break;
1206 }
1207 default:
1208 LOG(FATAL) << "Unexpected shift type " << type;
1209 }
1210}
1211
1212void InstructionCodeGeneratorMIPS64::HandleShift(HBinaryOperation* instr) {
1213 DCHECK(instr->IsShl() || instr->IsShr() || instr->IsUShr());
1214 LocationSummary* locations = instr->GetLocations();
1215 Primitive::Type type = instr->GetType();
1216
1217 switch (type) {
1218 case Primitive::kPrimInt:
1219 case Primitive::kPrimLong: {
1220 GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
1221 GpuRegister lhs = locations->InAt(0).AsRegister<GpuRegister>();
1222 Location rhs_location = locations->InAt(1);
1223
1224 GpuRegister rhs_reg = ZERO;
1225 int64_t rhs_imm = 0;
1226 bool use_imm = rhs_location.IsConstant();
1227 if (use_imm) {
1228 rhs_imm = CodeGenerator::GetInt64ValueOf(rhs_location.GetConstant());
1229 } else {
1230 rhs_reg = rhs_location.AsRegister<GpuRegister>();
1231 }
1232
1233 if (use_imm) {
1234 uint32_t shift_value = (type == Primitive::kPrimInt)
1235 ? static_cast<uint32_t>(rhs_imm & kMaxIntShiftValue)
1236 : static_cast<uint32_t>(rhs_imm & kMaxLongShiftValue);
1237
1238 if (type == Primitive::kPrimInt) {
1239 if (instr->IsShl()) {
1240 __ Sll(dst, lhs, shift_value);
1241 } else if (instr->IsShr()) {
1242 __ Sra(dst, lhs, shift_value);
1243 } else {
1244 __ Srl(dst, lhs, shift_value);
1245 }
1246 } else {
1247 if (shift_value < 32) {
1248 if (instr->IsShl()) {
1249 __ Dsll(dst, lhs, shift_value);
1250 } else if (instr->IsShr()) {
1251 __ Dsra(dst, lhs, shift_value);
1252 } else {
1253 __ Dsrl(dst, lhs, shift_value);
1254 }
1255 } else {
1256 shift_value -= 32;
1257 if (instr->IsShl()) {
1258 __ Dsll32(dst, lhs, shift_value);
1259 } else if (instr->IsShr()) {
1260 __ Dsra32(dst, lhs, shift_value);
1261 } else {
1262 __ Dsrl32(dst, lhs, shift_value);
1263 }
1264 }
1265 }
1266 } else {
1267 if (type == Primitive::kPrimInt) {
1268 if (instr->IsShl()) {
1269 __ Sllv(dst, lhs, rhs_reg);
1270 } else if (instr->IsShr()) {
1271 __ Srav(dst, lhs, rhs_reg);
1272 } else {
1273 __ Srlv(dst, lhs, rhs_reg);
1274 }
1275 } else {
1276 if (instr->IsShl()) {
1277 __ Dsllv(dst, lhs, rhs_reg);
1278 } else if (instr->IsShr()) {
1279 __ Dsrav(dst, lhs, rhs_reg);
1280 } else {
1281 __ Dsrlv(dst, lhs, rhs_reg);
1282 }
1283 }
1284 }
1285 break;
1286 }
1287 default:
1288 LOG(FATAL) << "Unexpected shift operation type " << type;
1289 }
1290}
1291
1292void LocationsBuilderMIPS64::VisitAdd(HAdd* instruction) {
1293 HandleBinaryOp(instruction);
1294}
1295
1296void InstructionCodeGeneratorMIPS64::VisitAdd(HAdd* instruction) {
1297 HandleBinaryOp(instruction);
1298}
1299
1300void LocationsBuilderMIPS64::VisitAnd(HAnd* instruction) {
1301 HandleBinaryOp(instruction);
1302}
1303
1304void InstructionCodeGeneratorMIPS64::VisitAnd(HAnd* instruction) {
1305 HandleBinaryOp(instruction);
1306}
1307
1308void LocationsBuilderMIPS64::VisitArrayGet(HArrayGet* instruction) {
1309 LocationSummary* locations =
1310 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
1311 locations->SetInAt(0, Location::RequiresRegister());
1312 locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
1313 if (Primitive::IsFloatingPointType(instruction->GetType())) {
1314 locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
1315 } else {
1316 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1317 }
1318}
1319
1320void InstructionCodeGeneratorMIPS64::VisitArrayGet(HArrayGet* instruction) {
1321 LocationSummary* locations = instruction->GetLocations();
1322 GpuRegister obj = locations->InAt(0).AsRegister<GpuRegister>();
1323 Location index = locations->InAt(1);
1324 Primitive::Type type = instruction->GetType();
1325
1326 switch (type) {
1327 case Primitive::kPrimBoolean: {
1328 uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint8_t)).Uint32Value();
1329 GpuRegister out = locations->Out().AsRegister<GpuRegister>();
1330 if (index.IsConstant()) {
1331 size_t offset =
1332 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset;
1333 __ LoadFromOffset(kLoadUnsignedByte, out, obj, offset);
1334 } else {
1335 __ Daddu(TMP, obj, index.AsRegister<GpuRegister>());
1336 __ LoadFromOffset(kLoadUnsignedByte, out, TMP, data_offset);
1337 }
1338 break;
1339 }
1340
1341 case Primitive::kPrimByte: {
1342 uint32_t data_offset = mirror::Array::DataOffset(sizeof(int8_t)).Uint32Value();
1343 GpuRegister out = locations->Out().AsRegister<GpuRegister>();
1344 if (index.IsConstant()) {
1345 size_t offset =
1346 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset;
1347 __ LoadFromOffset(kLoadSignedByte, out, obj, offset);
1348 } else {
1349 __ Daddu(TMP, obj, index.AsRegister<GpuRegister>());
1350 __ LoadFromOffset(kLoadSignedByte, out, TMP, data_offset);
1351 }
1352 break;
1353 }
1354
1355 case Primitive::kPrimShort: {
1356 uint32_t data_offset = mirror::Array::DataOffset(sizeof(int16_t)).Uint32Value();
1357 GpuRegister out = locations->Out().AsRegister<GpuRegister>();
1358 if (index.IsConstant()) {
1359 size_t offset =
1360 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset;
1361 __ LoadFromOffset(kLoadSignedHalfword, out, obj, offset);
1362 } else {
1363 __ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_2);
1364 __ Daddu(TMP, obj, TMP);
1365 __ LoadFromOffset(kLoadSignedHalfword, out, TMP, data_offset);
1366 }
1367 break;
1368 }
1369
1370 case Primitive::kPrimChar: {
1371 uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Uint32Value();
1372 GpuRegister out = locations->Out().AsRegister<GpuRegister>();
1373 if (index.IsConstant()) {
1374 size_t offset =
1375 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset;
1376 __ LoadFromOffset(kLoadUnsignedHalfword, out, obj, offset);
1377 } else {
1378 __ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_2);
1379 __ Daddu(TMP, obj, TMP);
1380 __ LoadFromOffset(kLoadUnsignedHalfword, out, TMP, data_offset);
1381 }
1382 break;
1383 }
1384
1385 case Primitive::kPrimInt:
1386 case Primitive::kPrimNot: {
1387 DCHECK_EQ(sizeof(mirror::HeapReference<mirror::Object>), sizeof(int32_t));
1388 uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
1389 GpuRegister out = locations->Out().AsRegister<GpuRegister>();
1390 LoadOperandType load_type = (type == Primitive::kPrimNot) ? kLoadUnsignedWord : kLoadWord;
1391 if (index.IsConstant()) {
1392 size_t offset =
1393 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
1394 __ LoadFromOffset(load_type, out, obj, offset);
1395 } else {
1396 __ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_4);
1397 __ Daddu(TMP, obj, TMP);
1398 __ LoadFromOffset(load_type, out, TMP, data_offset);
1399 }
1400 break;
1401 }
1402
1403 case Primitive::kPrimLong: {
1404 uint32_t data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Uint32Value();
1405 GpuRegister out = locations->Out().AsRegister<GpuRegister>();
1406 if (index.IsConstant()) {
1407 size_t offset =
1408 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
1409 __ LoadFromOffset(kLoadDoubleword, out, obj, offset);
1410 } else {
1411 __ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_8);
1412 __ Daddu(TMP, obj, TMP);
1413 __ LoadFromOffset(kLoadDoubleword, out, TMP, data_offset);
1414 }
1415 break;
1416 }
1417
1418 case Primitive::kPrimFloat: {
1419 uint32_t data_offset = mirror::Array::DataOffset(sizeof(float)).Uint32Value();
1420 FpuRegister out = locations->Out().AsFpuRegister<FpuRegister>();
1421 if (index.IsConstant()) {
1422 size_t offset =
1423 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
1424 __ LoadFpuFromOffset(kLoadWord, out, obj, offset);
1425 } else {
1426 __ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_4);
1427 __ Daddu(TMP, obj, TMP);
1428 __ LoadFpuFromOffset(kLoadWord, out, TMP, data_offset);
1429 }
1430 break;
1431 }
1432
1433 case Primitive::kPrimDouble: {
1434 uint32_t data_offset = mirror::Array::DataOffset(sizeof(double)).Uint32Value();
1435 FpuRegister out = locations->Out().AsFpuRegister<FpuRegister>();
1436 if (index.IsConstant()) {
1437 size_t offset =
1438 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
1439 __ LoadFpuFromOffset(kLoadDoubleword, out, obj, offset);
1440 } else {
1441 __ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_8);
1442 __ Daddu(TMP, obj, TMP);
1443 __ LoadFpuFromOffset(kLoadDoubleword, out, TMP, data_offset);
1444 }
1445 break;
1446 }
1447
1448 case Primitive::kPrimVoid:
1449 LOG(FATAL) << "Unreachable type " << instruction->GetType();
1450 UNREACHABLE();
1451 }
1452 codegen_->MaybeRecordImplicitNullCheck(instruction);
1453}
1454
1455void LocationsBuilderMIPS64::VisitArrayLength(HArrayLength* instruction) {
1456 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
1457 locations->SetInAt(0, Location::RequiresRegister());
1458 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1459}
1460
1461void InstructionCodeGeneratorMIPS64::VisitArrayLength(HArrayLength* instruction) {
1462 LocationSummary* locations = instruction->GetLocations();
1463 uint32_t offset = mirror::Array::LengthOffset().Uint32Value();
1464 GpuRegister obj = locations->InAt(0).AsRegister<GpuRegister>();
1465 GpuRegister out = locations->Out().AsRegister<GpuRegister>();
1466 __ LoadFromOffset(kLoadWord, out, obj, offset);
1467 codegen_->MaybeRecordImplicitNullCheck(instruction);
1468}
1469
1470void LocationsBuilderMIPS64::VisitArraySet(HArraySet* instruction) {
David Brazdilbb3d5052015-09-21 18:39:16 +01001471 bool needs_runtime_call = instruction->NeedsTypeCheck();
Alexey Frunze4dda3372015-06-01 18:31:49 -07001472 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
1473 instruction,
David Brazdilbb3d5052015-09-21 18:39:16 +01001474 needs_runtime_call ? LocationSummary::kCall : LocationSummary::kNoCall);
1475 if (needs_runtime_call) {
Alexey Frunze4dda3372015-06-01 18:31:49 -07001476 InvokeRuntimeCallingConvention calling_convention;
1477 locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
1478 locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
1479 locations->SetInAt(2, Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
1480 } else {
1481 locations->SetInAt(0, Location::RequiresRegister());
1482 locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
1483 if (Primitive::IsFloatingPointType(instruction->InputAt(2)->GetType())) {
1484 locations->SetInAt(2, Location::RequiresFpuRegister());
1485 } else {
1486 locations->SetInAt(2, Location::RequiresRegister());
1487 }
1488 }
1489}
1490
1491void InstructionCodeGeneratorMIPS64::VisitArraySet(HArraySet* instruction) {
1492 LocationSummary* locations = instruction->GetLocations();
1493 GpuRegister obj = locations->InAt(0).AsRegister<GpuRegister>();
1494 Location index = locations->InAt(1);
1495 Primitive::Type value_type = instruction->GetComponentType();
1496 bool needs_runtime_call = locations->WillCall();
1497 bool needs_write_barrier =
1498 CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue());
1499
1500 switch (value_type) {
1501 case Primitive::kPrimBoolean:
1502 case Primitive::kPrimByte: {
1503 uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint8_t)).Uint32Value();
1504 GpuRegister value = locations->InAt(2).AsRegister<GpuRegister>();
1505 if (index.IsConstant()) {
1506 size_t offset =
1507 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset;
1508 __ StoreToOffset(kStoreByte, value, obj, offset);
1509 } else {
1510 __ Daddu(TMP, obj, index.AsRegister<GpuRegister>());
1511 __ StoreToOffset(kStoreByte, value, TMP, data_offset);
1512 }
1513 break;
1514 }
1515
1516 case Primitive::kPrimShort:
1517 case Primitive::kPrimChar: {
1518 uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Uint32Value();
1519 GpuRegister value = locations->InAt(2).AsRegister<GpuRegister>();
1520 if (index.IsConstant()) {
1521 size_t offset =
1522 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset;
1523 __ StoreToOffset(kStoreHalfword, value, obj, offset);
1524 } else {
1525 __ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_2);
1526 __ Daddu(TMP, obj, TMP);
1527 __ StoreToOffset(kStoreHalfword, value, TMP, data_offset);
1528 }
1529 break;
1530 }
1531
1532 case Primitive::kPrimInt:
1533 case Primitive::kPrimNot: {
1534 if (!needs_runtime_call) {
1535 uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
1536 GpuRegister value = locations->InAt(2).AsRegister<GpuRegister>();
1537 if (index.IsConstant()) {
1538 size_t offset =
1539 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
1540 __ StoreToOffset(kStoreWord, value, obj, offset);
1541 } else {
1542 DCHECK(index.IsRegister()) << index;
1543 __ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_4);
1544 __ Daddu(TMP, obj, TMP);
1545 __ StoreToOffset(kStoreWord, value, TMP, data_offset);
1546 }
1547 codegen_->MaybeRecordImplicitNullCheck(instruction);
1548 if (needs_write_barrier) {
1549 DCHECK_EQ(value_type, Primitive::kPrimNot);
1550 codegen_->MarkGCCard(obj, value);
1551 }
1552 } else {
1553 DCHECK_EQ(value_type, Primitive::kPrimNot);
1554 codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pAputObject),
1555 instruction,
1556 instruction->GetDexPc(),
1557 nullptr);
1558 }
1559 break;
1560 }
1561
1562 case Primitive::kPrimLong: {
1563 uint32_t data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Uint32Value();
1564 GpuRegister value = locations->InAt(2).AsRegister<GpuRegister>();
1565 if (index.IsConstant()) {
1566 size_t offset =
1567 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
1568 __ StoreToOffset(kStoreDoubleword, value, obj, offset);
1569 } else {
1570 __ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_8);
1571 __ Daddu(TMP, obj, TMP);
1572 __ StoreToOffset(kStoreDoubleword, value, TMP, data_offset);
1573 }
1574 break;
1575 }
1576
1577 case Primitive::kPrimFloat: {
1578 uint32_t data_offset = mirror::Array::DataOffset(sizeof(float)).Uint32Value();
1579 FpuRegister value = locations->InAt(2).AsFpuRegister<FpuRegister>();
1580 DCHECK(locations->InAt(2).IsFpuRegister());
1581 if (index.IsConstant()) {
1582 size_t offset =
1583 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
1584 __ StoreFpuToOffset(kStoreWord, value, obj, offset);
1585 } else {
1586 __ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_4);
1587 __ Daddu(TMP, obj, TMP);
1588 __ StoreFpuToOffset(kStoreWord, value, TMP, data_offset);
1589 }
1590 break;
1591 }
1592
1593 case Primitive::kPrimDouble: {
1594 uint32_t data_offset = mirror::Array::DataOffset(sizeof(double)).Uint32Value();
1595 FpuRegister value = locations->InAt(2).AsFpuRegister<FpuRegister>();
1596 DCHECK(locations->InAt(2).IsFpuRegister());
1597 if (index.IsConstant()) {
1598 size_t offset =
1599 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
1600 __ StoreFpuToOffset(kStoreDoubleword, value, obj, offset);
1601 } else {
1602 __ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_8);
1603 __ Daddu(TMP, obj, TMP);
1604 __ StoreFpuToOffset(kStoreDoubleword, value, TMP, data_offset);
1605 }
1606 break;
1607 }
1608
1609 case Primitive::kPrimVoid:
1610 LOG(FATAL) << "Unreachable type " << instruction->GetType();
1611 UNREACHABLE();
1612 }
1613
1614 // Ints and objects are handled in the switch.
1615 if (value_type != Primitive::kPrimInt && value_type != Primitive::kPrimNot) {
1616 codegen_->MaybeRecordImplicitNullCheck(instruction);
1617 }
1618}
1619
1620void LocationsBuilderMIPS64::VisitBoundsCheck(HBoundsCheck* instruction) {
David Brazdil77a48ae2015-09-15 12:34:04 +00001621 LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock()
1622 ? LocationSummary::kCallOnSlowPath
1623 : LocationSummary::kNoCall;
1624 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
Alexey Frunze4dda3372015-06-01 18:31:49 -07001625 locations->SetInAt(0, Location::RequiresRegister());
1626 locations->SetInAt(1, Location::RequiresRegister());
1627 if (instruction->HasUses()) {
1628 locations->SetOut(Location::SameAsFirstInput());
1629 }
1630}
1631
1632void InstructionCodeGeneratorMIPS64::VisitBoundsCheck(HBoundsCheck* instruction) {
1633 LocationSummary* locations = instruction->GetLocations();
Serban Constantinescu5a6cc492015-08-13 15:20:25 +01001634 BoundsCheckSlowPathMIPS64* slow_path =
1635 new (GetGraph()->GetArena()) BoundsCheckSlowPathMIPS64(instruction);
Alexey Frunze4dda3372015-06-01 18:31:49 -07001636 codegen_->AddSlowPath(slow_path);
1637
1638 GpuRegister index = locations->InAt(0).AsRegister<GpuRegister>();
1639 GpuRegister length = locations->InAt(1).AsRegister<GpuRegister>();
1640
1641 // length is limited by the maximum positive signed 32-bit integer.
1642 // Unsigned comparison of length and index checks for index < 0
1643 // and for length <= index simultaneously.
1644 // Mips R6 requires lhs != rhs for compact branches.
1645 if (index == length) {
1646 __ B(slow_path->GetEntryLabel());
1647 } else {
1648 __ Bgeuc(index, length, slow_path->GetEntryLabel());
1649 }
1650}
1651
1652void LocationsBuilderMIPS64::VisitCheckCast(HCheckCast* instruction) {
1653 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
1654 instruction,
1655 LocationSummary::kCallOnSlowPath);
1656 locations->SetInAt(0, Location::RequiresRegister());
1657 locations->SetInAt(1, Location::RequiresRegister());
Serban Constantinescu5a6cc492015-08-13 15:20:25 +01001658 // Note that TypeCheckSlowPathMIPS64 uses this register too.
Alexey Frunze4dda3372015-06-01 18:31:49 -07001659 locations->AddTemp(Location::RequiresRegister());
1660}
1661
1662void InstructionCodeGeneratorMIPS64::VisitCheckCast(HCheckCast* instruction) {
1663 LocationSummary* locations = instruction->GetLocations();
1664 GpuRegister obj = locations->InAt(0).AsRegister<GpuRegister>();
1665 GpuRegister cls = locations->InAt(1).AsRegister<GpuRegister>();
1666 GpuRegister obj_cls = locations->GetTemp(0).AsRegister<GpuRegister>();
1667
Serban Constantinescu5a6cc492015-08-13 15:20:25 +01001668 SlowPathCodeMIPS64* slow_path =
1669 new (GetGraph()->GetArena()) TypeCheckSlowPathMIPS64(instruction);
Alexey Frunze4dda3372015-06-01 18:31:49 -07001670 codegen_->AddSlowPath(slow_path);
1671
1672 // TODO: avoid this check if we know obj is not null.
1673 __ Beqzc(obj, slow_path->GetExitLabel());
1674 // Compare the class of `obj` with `cls`.
1675 __ LoadFromOffset(kLoadUnsignedWord, obj_cls, obj, mirror::Object::ClassOffset().Int32Value());
1676 __ Bnec(obj_cls, cls, slow_path->GetEntryLabel());
1677 __ Bind(slow_path->GetExitLabel());
1678}
1679
1680void LocationsBuilderMIPS64::VisitClinitCheck(HClinitCheck* check) {
1681 LocationSummary* locations =
1682 new (GetGraph()->GetArena()) LocationSummary(check, LocationSummary::kCallOnSlowPath);
1683 locations->SetInAt(0, Location::RequiresRegister());
1684 if (check->HasUses()) {
1685 locations->SetOut(Location::SameAsFirstInput());
1686 }
1687}
1688
1689void InstructionCodeGeneratorMIPS64::VisitClinitCheck(HClinitCheck* check) {
1690 // We assume the class is not null.
1691 SlowPathCodeMIPS64* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathMIPS64(
1692 check->GetLoadClass(),
1693 check,
1694 check->GetDexPc(),
1695 true);
1696 codegen_->AddSlowPath(slow_path);
1697 GenerateClassInitializationCheck(slow_path,
1698 check->GetLocations()->InAt(0).AsRegister<GpuRegister>());
1699}
1700
1701void LocationsBuilderMIPS64::VisitCompare(HCompare* compare) {
1702 Primitive::Type in_type = compare->InputAt(0)->GetType();
1703
1704 LocationSummary::CallKind call_kind = Primitive::IsFloatingPointType(in_type)
1705 ? LocationSummary::kCall
1706 : LocationSummary::kNoCall;
1707
1708 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(compare, call_kind);
1709
1710 switch (in_type) {
1711 case Primitive::kPrimLong:
1712 locations->SetInAt(0, Location::RequiresRegister());
1713 locations->SetInAt(1, Location::RequiresRegister());
1714 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1715 break;
1716
1717 case Primitive::kPrimFloat:
1718 case Primitive::kPrimDouble: {
1719 InvokeRuntimeCallingConvention calling_convention;
1720 locations->SetInAt(0, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(0)));
1721 locations->SetInAt(1, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(1)));
1722 locations->SetOut(calling_convention.GetReturnLocation(Primitive::kPrimInt));
1723 break;
1724 }
1725
1726 default:
1727 LOG(FATAL) << "Unexpected type for compare operation " << in_type;
1728 }
1729}
1730
1731void InstructionCodeGeneratorMIPS64::VisitCompare(HCompare* instruction) {
1732 LocationSummary* locations = instruction->GetLocations();
1733 Primitive::Type in_type = instruction->InputAt(0)->GetType();
1734
1735 // 0 if: left == right
1736 // 1 if: left > right
1737 // -1 if: left < right
1738 switch (in_type) {
1739 case Primitive::kPrimLong: {
1740 GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
1741 GpuRegister lhs = locations->InAt(0).AsRegister<GpuRegister>();
1742 GpuRegister rhs = locations->InAt(1).AsRegister<GpuRegister>();
1743 // TODO: more efficient (direct) comparison with a constant
1744 __ Slt(TMP, lhs, rhs);
1745 __ Slt(dst, rhs, lhs);
1746 __ Subu(dst, dst, TMP);
1747 break;
1748 }
1749
1750 case Primitive::kPrimFloat:
1751 case Primitive::kPrimDouble: {
1752 int32_t entry_point_offset;
1753 if (in_type == Primitive::kPrimFloat) {
1754 entry_point_offset = instruction->IsGtBias() ? QUICK_ENTRY_POINT(pCmpgFloat)
1755 : QUICK_ENTRY_POINT(pCmplFloat);
1756 } else {
1757 entry_point_offset = instruction->IsGtBias() ? QUICK_ENTRY_POINT(pCmpgDouble)
1758 : QUICK_ENTRY_POINT(pCmplDouble);
1759 }
1760 codegen_->InvokeRuntime(entry_point_offset, instruction, instruction->GetDexPc(), nullptr);
1761 break;
1762 }
1763
1764 default:
1765 LOG(FATAL) << "Unimplemented compare type " << in_type;
1766 }
1767}
1768
1769void LocationsBuilderMIPS64::VisitCondition(HCondition* instruction) {
1770 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
1771 locations->SetInAt(0, Location::RequiresRegister());
1772 locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
1773 if (instruction->NeedsMaterialization()) {
1774 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1775 }
1776}
1777
1778void InstructionCodeGeneratorMIPS64::VisitCondition(HCondition* instruction) {
1779 if (!instruction->NeedsMaterialization()) {
1780 return;
1781 }
1782
1783 LocationSummary* locations = instruction->GetLocations();
1784
1785 GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
1786 GpuRegister lhs = locations->InAt(0).AsRegister<GpuRegister>();
1787 Location rhs_location = locations->InAt(1);
1788
1789 GpuRegister rhs_reg = ZERO;
1790 int64_t rhs_imm = 0;
1791 bool use_imm = rhs_location.IsConstant();
1792 if (use_imm) {
1793 rhs_imm = CodeGenerator::GetInt32ValueOf(rhs_location.GetConstant());
1794 } else {
1795 rhs_reg = rhs_location.AsRegister<GpuRegister>();
1796 }
1797
1798 IfCondition if_cond = instruction->GetCondition();
1799
1800 switch (if_cond) {
1801 case kCondEQ:
1802 case kCondNE:
1803 if (use_imm && IsUint<16>(rhs_imm)) {
1804 __ Xori(dst, lhs, rhs_imm);
1805 } else {
1806 if (use_imm) {
1807 rhs_reg = TMP;
1808 __ LoadConst32(rhs_reg, rhs_imm);
1809 }
1810 __ Xor(dst, lhs, rhs_reg);
1811 }
1812 if (if_cond == kCondEQ) {
1813 __ Sltiu(dst, dst, 1);
1814 } else {
1815 __ Sltu(dst, ZERO, dst);
1816 }
1817 break;
1818
1819 case kCondLT:
1820 case kCondGE:
1821 if (use_imm && IsInt<16>(rhs_imm)) {
1822 __ Slti(dst, lhs, rhs_imm);
1823 } else {
1824 if (use_imm) {
1825 rhs_reg = TMP;
1826 __ LoadConst32(rhs_reg, rhs_imm);
1827 }
1828 __ Slt(dst, lhs, rhs_reg);
1829 }
1830 if (if_cond == kCondGE) {
1831 // Simulate lhs >= rhs via !(lhs < rhs) since there's
1832 // only the slt instruction but no sge.
1833 __ Xori(dst, dst, 1);
1834 }
1835 break;
1836
1837 case kCondLE:
1838 case kCondGT:
1839 if (use_imm && IsInt<16>(rhs_imm + 1)) {
1840 // Simulate lhs <= rhs via lhs < rhs + 1.
1841 __ Slti(dst, lhs, rhs_imm + 1);
1842 if (if_cond == kCondGT) {
1843 // Simulate lhs > rhs via !(lhs <= rhs) since there's
1844 // only the slti instruction but no sgti.
1845 __ Xori(dst, dst, 1);
1846 }
1847 } else {
1848 if (use_imm) {
1849 rhs_reg = TMP;
1850 __ LoadConst32(rhs_reg, rhs_imm);
1851 }
1852 __ Slt(dst, rhs_reg, lhs);
1853 if (if_cond == kCondLE) {
1854 // Simulate lhs <= rhs via !(rhs < lhs) since there's
1855 // only the slt instruction but no sle.
1856 __ Xori(dst, dst, 1);
1857 }
1858 }
1859 break;
1860 }
1861}
1862
1863void LocationsBuilderMIPS64::VisitDiv(HDiv* div) {
1864 LocationSummary* locations =
1865 new (GetGraph()->GetArena()) LocationSummary(div, LocationSummary::kNoCall);
1866 switch (div->GetResultType()) {
1867 case Primitive::kPrimInt:
1868 case Primitive::kPrimLong:
1869 locations->SetInAt(0, Location::RequiresRegister());
1870 locations->SetInAt(1, Location::RequiresRegister());
1871 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1872 break;
1873
1874 case Primitive::kPrimFloat:
1875 case Primitive::kPrimDouble:
1876 locations->SetInAt(0, Location::RequiresFpuRegister());
1877 locations->SetInAt(1, Location::RequiresFpuRegister());
1878 locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
1879 break;
1880
1881 default:
1882 LOG(FATAL) << "Unexpected div type " << div->GetResultType();
1883 }
1884}
1885
1886void InstructionCodeGeneratorMIPS64::VisitDiv(HDiv* instruction) {
1887 Primitive::Type type = instruction->GetType();
1888 LocationSummary* locations = instruction->GetLocations();
1889
1890 switch (type) {
1891 case Primitive::kPrimInt:
1892 case Primitive::kPrimLong: {
1893 GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
1894 GpuRegister lhs = locations->InAt(0).AsRegister<GpuRegister>();
1895 GpuRegister rhs = locations->InAt(1).AsRegister<GpuRegister>();
1896 if (type == Primitive::kPrimInt)
1897 __ DivR6(dst, lhs, rhs);
1898 else
1899 __ Ddiv(dst, lhs, rhs);
1900 break;
1901 }
1902 case Primitive::kPrimFloat:
1903 case Primitive::kPrimDouble: {
1904 FpuRegister dst = locations->Out().AsFpuRegister<FpuRegister>();
1905 FpuRegister lhs = locations->InAt(0).AsFpuRegister<FpuRegister>();
1906 FpuRegister rhs = locations->InAt(1).AsFpuRegister<FpuRegister>();
1907 if (type == Primitive::kPrimFloat)
1908 __ DivS(dst, lhs, rhs);
1909 else
1910 __ DivD(dst, lhs, rhs);
1911 break;
1912 }
1913 default:
1914 LOG(FATAL) << "Unexpected div type " << type;
1915 }
1916}
1917
1918void LocationsBuilderMIPS64::VisitDivZeroCheck(HDivZeroCheck* instruction) {
David Brazdil77a48ae2015-09-15 12:34:04 +00001919 LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock()
1920 ? LocationSummary::kCallOnSlowPath
1921 : LocationSummary::kNoCall;
1922 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
Alexey Frunze4dda3372015-06-01 18:31:49 -07001923 locations->SetInAt(0, Location::RegisterOrConstant(instruction->InputAt(0)));
1924 if (instruction->HasUses()) {
1925 locations->SetOut(Location::SameAsFirstInput());
1926 }
1927}
1928
1929void InstructionCodeGeneratorMIPS64::VisitDivZeroCheck(HDivZeroCheck* instruction) {
1930 SlowPathCodeMIPS64* slow_path =
1931 new (GetGraph()->GetArena()) DivZeroCheckSlowPathMIPS64(instruction);
1932 codegen_->AddSlowPath(slow_path);
1933 Location value = instruction->GetLocations()->InAt(0);
1934
1935 Primitive::Type type = instruction->GetType();
1936
Serguei Katkov8c0676c2015-08-03 13:55:33 +06001937 if ((type == Primitive::kPrimBoolean) || !Primitive::IsIntegralType(type)) {
Alexey Frunze4dda3372015-06-01 18:31:49 -07001938 LOG(FATAL) << "Unexpected type " << type << " for DivZeroCheck.";
Serguei Katkov8c0676c2015-08-03 13:55:33 +06001939 return;
Alexey Frunze4dda3372015-06-01 18:31:49 -07001940 }
1941
1942 if (value.IsConstant()) {
1943 int64_t divisor = codegen_->GetInt64ValueOf(value.GetConstant()->AsConstant());
1944 if (divisor == 0) {
1945 __ B(slow_path->GetEntryLabel());
1946 } else {
1947 // A division by a non-null constant is valid. We don't need to perform
1948 // any check, so simply fall through.
1949 }
1950 } else {
1951 __ Beqzc(value.AsRegister<GpuRegister>(), slow_path->GetEntryLabel());
1952 }
1953}
1954
1955void LocationsBuilderMIPS64::VisitDoubleConstant(HDoubleConstant* constant) {
1956 LocationSummary* locations =
1957 new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
1958 locations->SetOut(Location::ConstantLocation(constant));
1959}
1960
1961void InstructionCodeGeneratorMIPS64::VisitDoubleConstant(HDoubleConstant* cst ATTRIBUTE_UNUSED) {
1962 // Will be generated at use site.
1963}
1964
1965void LocationsBuilderMIPS64::VisitExit(HExit* exit) {
1966 exit->SetLocations(nullptr);
1967}
1968
1969void InstructionCodeGeneratorMIPS64::VisitExit(HExit* exit ATTRIBUTE_UNUSED) {
1970}
1971
1972void LocationsBuilderMIPS64::VisitFloatConstant(HFloatConstant* constant) {
1973 LocationSummary* locations =
1974 new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
1975 locations->SetOut(Location::ConstantLocation(constant));
1976}
1977
1978void InstructionCodeGeneratorMIPS64::VisitFloatConstant(HFloatConstant* constant ATTRIBUTE_UNUSED) {
1979 // Will be generated at use site.
1980}
1981
David Brazdilfc6a86a2015-06-26 10:33:45 +00001982void InstructionCodeGeneratorMIPS64::HandleGoto(HInstruction* got, HBasicBlock* successor) {
Alexey Frunze4dda3372015-06-01 18:31:49 -07001983 DCHECK(!successor->IsExitBlock());
1984 HBasicBlock* block = got->GetBlock();
1985 HInstruction* previous = got->GetPrevious();
1986 HLoopInformation* info = block->GetLoopInformation();
1987
1988 if (info != nullptr && info->IsBackEdge(*block) && info->HasSuspendCheck()) {
1989 codegen_->ClearSpillSlotsFromLoopPhisInStackMap(info->GetSuspendCheck());
1990 GenerateSuspendCheck(info->GetSuspendCheck(), successor);
1991 return;
1992 }
1993 if (block->IsEntryBlock() && (previous != nullptr) && previous->IsSuspendCheck()) {
1994 GenerateSuspendCheck(previous->AsSuspendCheck(), nullptr);
1995 }
1996 if (!codegen_->GoesToNextBlock(block, successor)) {
1997 __ B(codegen_->GetLabelOf(successor));
1998 }
1999}
2000
David Brazdilfc6a86a2015-06-26 10:33:45 +00002001void LocationsBuilderMIPS64::VisitGoto(HGoto* got) {
2002 got->SetLocations(nullptr);
2003}
2004
2005void InstructionCodeGeneratorMIPS64::VisitGoto(HGoto* got) {
2006 HandleGoto(got, got->GetSuccessor());
2007}
2008
2009void LocationsBuilderMIPS64::VisitTryBoundary(HTryBoundary* try_boundary) {
2010 try_boundary->SetLocations(nullptr);
2011}
2012
2013void InstructionCodeGeneratorMIPS64::VisitTryBoundary(HTryBoundary* try_boundary) {
2014 HBasicBlock* successor = try_boundary->GetNormalFlowSuccessor();
2015 if (!successor->IsExitBlock()) {
2016 HandleGoto(try_boundary, successor);
2017 }
2018}
2019
Alexey Frunze4dda3372015-06-01 18:31:49 -07002020void InstructionCodeGeneratorMIPS64::GenerateTestAndBranch(HInstruction* instruction,
2021 Label* true_target,
2022 Label* false_target,
2023 Label* always_true_target) {
2024 HInstruction* cond = instruction->InputAt(0);
2025 HCondition* condition = cond->AsCondition();
2026
2027 if (cond->IsIntConstant()) {
2028 int32_t cond_value = cond->AsIntConstant()->GetValue();
2029 if (cond_value == 1) {
2030 if (always_true_target != nullptr) {
2031 __ B(always_true_target);
2032 }
2033 return;
2034 } else {
2035 DCHECK_EQ(cond_value, 0);
2036 }
2037 } else if (!cond->IsCondition() || condition->NeedsMaterialization()) {
2038 // The condition instruction has been materialized, compare the output to 0.
2039 Location cond_val = instruction->GetLocations()->InAt(0);
2040 DCHECK(cond_val.IsRegister());
2041 __ Bnezc(cond_val.AsRegister<GpuRegister>(), true_target);
2042 } else {
2043 // The condition instruction has not been materialized, use its inputs as
2044 // the comparison and its condition as the branch condition.
2045 GpuRegister lhs = condition->GetLocations()->InAt(0).AsRegister<GpuRegister>();
2046 Location rhs_location = condition->GetLocations()->InAt(1);
2047 GpuRegister rhs_reg = ZERO;
2048 int32_t rhs_imm = 0;
2049 bool use_imm = rhs_location.IsConstant();
2050 if (use_imm) {
2051 rhs_imm = CodeGenerator::GetInt32ValueOf(rhs_location.GetConstant());
2052 } else {
2053 rhs_reg = rhs_location.AsRegister<GpuRegister>();
2054 }
2055
2056 IfCondition if_cond = condition->GetCondition();
2057 if (use_imm && rhs_imm == 0) {
2058 switch (if_cond) {
2059 case kCondEQ:
2060 __ Beqzc(lhs, true_target);
2061 break;
2062 case kCondNE:
2063 __ Bnezc(lhs, true_target);
2064 break;
2065 case kCondLT:
2066 __ Bltzc(lhs, true_target);
2067 break;
2068 case kCondGE:
2069 __ Bgezc(lhs, true_target);
2070 break;
2071 case kCondLE:
2072 __ Blezc(lhs, true_target);
2073 break;
2074 case kCondGT:
2075 __ Bgtzc(lhs, true_target);
2076 break;
2077 }
2078 } else {
2079 if (use_imm) {
2080 rhs_reg = TMP;
2081 __ LoadConst32(rhs_reg, rhs_imm);
2082 }
2083 // It looks like we can get here with lhs == rhs. Should that be possible at all?
2084 // Mips R6 requires lhs != rhs for compact branches.
2085 if (lhs == rhs_reg) {
2086 DCHECK(!use_imm);
2087 switch (if_cond) {
2088 case kCondEQ:
2089 case kCondGE:
2090 case kCondLE:
2091 // if lhs == rhs for a positive condition, then it is a branch
2092 __ B(true_target);
2093 break;
2094 case kCondNE:
2095 case kCondLT:
2096 case kCondGT:
2097 // if lhs == rhs for a negative condition, then it is a NOP
2098 break;
2099 }
2100 } else {
2101 switch (if_cond) {
2102 case kCondEQ:
2103 __ Beqc(lhs, rhs_reg, true_target);
2104 break;
2105 case kCondNE:
2106 __ Bnec(lhs, rhs_reg, true_target);
2107 break;
2108 case kCondLT:
2109 __ Bltc(lhs, rhs_reg, true_target);
2110 break;
2111 case kCondGE:
2112 __ Bgec(lhs, rhs_reg, true_target);
2113 break;
2114 case kCondLE:
2115 __ Bgec(rhs_reg, lhs, true_target);
2116 break;
2117 case kCondGT:
2118 __ Bltc(rhs_reg, lhs, true_target);
2119 break;
2120 }
2121 }
2122 }
2123 }
2124 if (false_target != nullptr) {
2125 __ B(false_target);
2126 }
2127}
2128
2129void LocationsBuilderMIPS64::VisitIf(HIf* if_instr) {
2130 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(if_instr);
2131 HInstruction* cond = if_instr->InputAt(0);
2132 if (!cond->IsCondition() || cond->AsCondition()->NeedsMaterialization()) {
2133 locations->SetInAt(0, Location::RequiresRegister());
2134 }
2135}
2136
2137void InstructionCodeGeneratorMIPS64::VisitIf(HIf* if_instr) {
2138 Label* true_target = codegen_->GetLabelOf(if_instr->IfTrueSuccessor());
2139 Label* false_target = codegen_->GetLabelOf(if_instr->IfFalseSuccessor());
2140 Label* always_true_target = true_target;
2141 if (codegen_->GoesToNextBlock(if_instr->GetBlock(),
2142 if_instr->IfTrueSuccessor())) {
2143 always_true_target = nullptr;
2144 }
2145 if (codegen_->GoesToNextBlock(if_instr->GetBlock(),
2146 if_instr->IfFalseSuccessor())) {
2147 false_target = nullptr;
2148 }
2149 GenerateTestAndBranch(if_instr, true_target, false_target, always_true_target);
2150}
2151
2152void LocationsBuilderMIPS64::VisitDeoptimize(HDeoptimize* deoptimize) {
2153 LocationSummary* locations = new (GetGraph()->GetArena())
2154 LocationSummary(deoptimize, LocationSummary::kCallOnSlowPath);
2155 HInstruction* cond = deoptimize->InputAt(0);
2156 DCHECK(cond->IsCondition());
2157 if (cond->AsCondition()->NeedsMaterialization()) {
2158 locations->SetInAt(0, Location::RequiresRegister());
2159 }
2160}
2161
2162void InstructionCodeGeneratorMIPS64::VisitDeoptimize(HDeoptimize* deoptimize) {
2163 SlowPathCodeMIPS64* slow_path = new (GetGraph()->GetArena())
2164 DeoptimizationSlowPathMIPS64(deoptimize);
2165 codegen_->AddSlowPath(slow_path);
2166 Label* slow_path_entry = slow_path->GetEntryLabel();
2167 GenerateTestAndBranch(deoptimize, slow_path_entry, nullptr, slow_path_entry);
2168}
2169
2170void LocationsBuilderMIPS64::HandleFieldGet(HInstruction* instruction,
2171 const FieldInfo& field_info ATTRIBUTE_UNUSED) {
2172 LocationSummary* locations =
2173 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
2174 locations->SetInAt(0, Location::RequiresRegister());
2175 if (Primitive::IsFloatingPointType(instruction->GetType())) {
2176 locations->SetOut(Location::RequiresFpuRegister());
2177 } else {
2178 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2179 }
2180}
2181
2182void InstructionCodeGeneratorMIPS64::HandleFieldGet(HInstruction* instruction,
2183 const FieldInfo& field_info) {
2184 Primitive::Type type = field_info.GetFieldType();
2185 LocationSummary* locations = instruction->GetLocations();
2186 GpuRegister obj = locations->InAt(0).AsRegister<GpuRegister>();
2187 LoadOperandType load_type = kLoadUnsignedByte;
2188 switch (type) {
2189 case Primitive::kPrimBoolean:
2190 load_type = kLoadUnsignedByte;
2191 break;
2192 case Primitive::kPrimByte:
2193 load_type = kLoadSignedByte;
2194 break;
2195 case Primitive::kPrimShort:
2196 load_type = kLoadSignedHalfword;
2197 break;
2198 case Primitive::kPrimChar:
2199 load_type = kLoadUnsignedHalfword;
2200 break;
2201 case Primitive::kPrimInt:
2202 case Primitive::kPrimFloat:
2203 load_type = kLoadWord;
2204 break;
2205 case Primitive::kPrimLong:
2206 case Primitive::kPrimDouble:
2207 load_type = kLoadDoubleword;
2208 break;
2209 case Primitive::kPrimNot:
2210 load_type = kLoadUnsignedWord;
2211 break;
2212 case Primitive::kPrimVoid:
2213 LOG(FATAL) << "Unreachable type " << type;
2214 UNREACHABLE();
2215 }
2216 if (!Primitive::IsFloatingPointType(type)) {
2217 DCHECK(locations->Out().IsRegister());
2218 GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
2219 __ LoadFromOffset(load_type, dst, obj, field_info.GetFieldOffset().Uint32Value());
2220 } else {
2221 DCHECK(locations->Out().IsFpuRegister());
2222 FpuRegister dst = locations->Out().AsFpuRegister<FpuRegister>();
2223 __ LoadFpuFromOffset(load_type, dst, obj, field_info.GetFieldOffset().Uint32Value());
2224 }
2225
2226 codegen_->MaybeRecordImplicitNullCheck(instruction);
2227 // TODO: memory barrier?
2228}
2229
2230void LocationsBuilderMIPS64::HandleFieldSet(HInstruction* instruction,
2231 const FieldInfo& field_info ATTRIBUTE_UNUSED) {
2232 LocationSummary* locations =
2233 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
2234 locations->SetInAt(0, Location::RequiresRegister());
2235 if (Primitive::IsFloatingPointType(instruction->InputAt(1)->GetType())) {
2236 locations->SetInAt(1, Location::RequiresFpuRegister());
2237 } else {
2238 locations->SetInAt(1, Location::RequiresRegister());
2239 }
2240}
2241
2242void InstructionCodeGeneratorMIPS64::HandleFieldSet(HInstruction* instruction,
2243 const FieldInfo& field_info) {
2244 Primitive::Type type = field_info.GetFieldType();
2245 LocationSummary* locations = instruction->GetLocations();
2246 GpuRegister obj = locations->InAt(0).AsRegister<GpuRegister>();
2247 StoreOperandType store_type = kStoreByte;
2248 switch (type) {
2249 case Primitive::kPrimBoolean:
2250 case Primitive::kPrimByte:
2251 store_type = kStoreByte;
2252 break;
2253 case Primitive::kPrimShort:
2254 case Primitive::kPrimChar:
2255 store_type = kStoreHalfword;
2256 break;
2257 case Primitive::kPrimInt:
2258 case Primitive::kPrimFloat:
2259 case Primitive::kPrimNot:
2260 store_type = kStoreWord;
2261 break;
2262 case Primitive::kPrimLong:
2263 case Primitive::kPrimDouble:
2264 store_type = kStoreDoubleword;
2265 break;
2266 case Primitive::kPrimVoid:
2267 LOG(FATAL) << "Unreachable type " << type;
2268 UNREACHABLE();
2269 }
2270 if (!Primitive::IsFloatingPointType(type)) {
2271 DCHECK(locations->InAt(1).IsRegister());
2272 GpuRegister src = locations->InAt(1).AsRegister<GpuRegister>();
2273 __ StoreToOffset(store_type, src, obj, field_info.GetFieldOffset().Uint32Value());
2274 } else {
2275 DCHECK(locations->InAt(1).IsFpuRegister());
2276 FpuRegister src = locations->InAt(1).AsFpuRegister<FpuRegister>();
2277 __ StoreFpuToOffset(store_type, src, obj, field_info.GetFieldOffset().Uint32Value());
2278 }
2279
2280 codegen_->MaybeRecordImplicitNullCheck(instruction);
2281 // TODO: memory barriers?
2282 if (CodeGenerator::StoreNeedsWriteBarrier(type, instruction->InputAt(1))) {
2283 DCHECK(locations->InAt(1).IsRegister());
2284 GpuRegister src = locations->InAt(1).AsRegister<GpuRegister>();
2285 codegen_->MarkGCCard(obj, src);
2286 }
2287}
2288
2289void LocationsBuilderMIPS64::VisitInstanceFieldGet(HInstanceFieldGet* instruction) {
2290 HandleFieldGet(instruction, instruction->GetFieldInfo());
2291}
2292
2293void InstructionCodeGeneratorMIPS64::VisitInstanceFieldGet(HInstanceFieldGet* instruction) {
2294 HandleFieldGet(instruction, instruction->GetFieldInfo());
2295}
2296
2297void LocationsBuilderMIPS64::VisitInstanceFieldSet(HInstanceFieldSet* instruction) {
2298 HandleFieldSet(instruction, instruction->GetFieldInfo());
2299}
2300
2301void InstructionCodeGeneratorMIPS64::VisitInstanceFieldSet(HInstanceFieldSet* instruction) {
2302 HandleFieldSet(instruction, instruction->GetFieldInfo());
2303}
2304
2305void LocationsBuilderMIPS64::VisitInstanceOf(HInstanceOf* instruction) {
2306 LocationSummary::CallKind call_kind =
Nicolas Geoffray85c7bab2015-09-18 13:40:46 +00002307 instruction->IsExactCheck() ? LocationSummary::kNoCall : LocationSummary::kCallOnSlowPath;
Alexey Frunze4dda3372015-06-01 18:31:49 -07002308 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
2309 locations->SetInAt(0, Location::RequiresRegister());
2310 locations->SetInAt(1, Location::RequiresRegister());
2311 // The output does overlap inputs.
Serban Constantinescu5a6cc492015-08-13 15:20:25 +01002312 // Note that TypeCheckSlowPathMIPS64 uses this register too.
Alexey Frunze4dda3372015-06-01 18:31:49 -07002313 locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
2314}
2315
2316void InstructionCodeGeneratorMIPS64::VisitInstanceOf(HInstanceOf* instruction) {
2317 LocationSummary* locations = instruction->GetLocations();
2318 GpuRegister obj = locations->InAt(0).AsRegister<GpuRegister>();
2319 GpuRegister cls = locations->InAt(1).AsRegister<GpuRegister>();
2320 GpuRegister out = locations->Out().AsRegister<GpuRegister>();
2321
2322 Label done;
2323
2324 // Return 0 if `obj` is null.
2325 // TODO: Avoid this check if we know `obj` is not null.
2326 __ Move(out, ZERO);
2327 __ Beqzc(obj, &done);
2328
2329 // Compare the class of `obj` with `cls`.
2330 __ LoadFromOffset(kLoadUnsignedWord, out, obj, mirror::Object::ClassOffset().Int32Value());
Nicolas Geoffray85c7bab2015-09-18 13:40:46 +00002331 if (instruction->IsExactCheck()) {
Alexey Frunze4dda3372015-06-01 18:31:49 -07002332 // Classes must be equal for the instanceof to succeed.
2333 __ Xor(out, out, cls);
2334 __ Sltiu(out, out, 1);
2335 } else {
2336 // If the classes are not equal, we go into a slow path.
2337 DCHECK(locations->OnlyCallsOnSlowPath());
2338 SlowPathCodeMIPS64* slow_path =
Serban Constantinescu5a6cc492015-08-13 15:20:25 +01002339 new (GetGraph()->GetArena()) TypeCheckSlowPathMIPS64(instruction);
Alexey Frunze4dda3372015-06-01 18:31:49 -07002340 codegen_->AddSlowPath(slow_path);
2341 __ Bnec(out, cls, slow_path->GetEntryLabel());
2342 __ LoadConst32(out, 1);
2343 __ Bind(slow_path->GetExitLabel());
2344 }
2345
2346 __ Bind(&done);
2347}
2348
2349void LocationsBuilderMIPS64::VisitIntConstant(HIntConstant* constant) {
2350 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(constant);
2351 locations->SetOut(Location::ConstantLocation(constant));
2352}
2353
2354void InstructionCodeGeneratorMIPS64::VisitIntConstant(HIntConstant* constant ATTRIBUTE_UNUSED) {
2355 // Will be generated at use site.
2356}
2357
2358void LocationsBuilderMIPS64::VisitNullConstant(HNullConstant* constant) {
2359 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(constant);
2360 locations->SetOut(Location::ConstantLocation(constant));
2361}
2362
2363void InstructionCodeGeneratorMIPS64::VisitNullConstant(HNullConstant* constant ATTRIBUTE_UNUSED) {
2364 // Will be generated at use site.
2365}
2366
Calin Juravle175dc732015-08-25 15:42:32 +01002367void LocationsBuilderMIPS64::VisitInvokeUnresolved(HInvokeUnresolved* invoke) {
2368 // The trampoline uses the same calling convention as dex calling conventions,
2369 // except instead of loading arg0/r0 with the target Method*, arg0/r0 will contain
2370 // the method_idx.
2371 HandleInvoke(invoke);
2372}
2373
2374void InstructionCodeGeneratorMIPS64::VisitInvokeUnresolved(HInvokeUnresolved* invoke) {
2375 codegen_->GenerateInvokeUnresolvedRuntimeCall(invoke);
2376}
2377
Alexey Frunze4dda3372015-06-01 18:31:49 -07002378void LocationsBuilderMIPS64::HandleInvoke(HInvoke* invoke) {
2379 InvokeDexCallingConventionVisitorMIPS64 calling_convention_visitor;
2380 CodeGenerator::CreateCommonInvokeLocationSummary(invoke, &calling_convention_visitor);
2381}
2382
2383void LocationsBuilderMIPS64::VisitInvokeInterface(HInvokeInterface* invoke) {
2384 HandleInvoke(invoke);
2385 // The register T0 is required to be used for the hidden argument in
2386 // art_quick_imt_conflict_trampoline, so add the hidden argument.
2387 invoke->GetLocations()->AddTemp(Location::RegisterLocation(T0));
2388}
2389
2390void InstructionCodeGeneratorMIPS64::VisitInvokeInterface(HInvokeInterface* invoke) {
2391 // TODO: b/18116999, our IMTs can miss an IncompatibleClassChangeError.
2392 GpuRegister temp = invoke->GetLocations()->GetTemp(0).AsRegister<GpuRegister>();
2393 uint32_t method_offset = mirror::Class::EmbeddedImTableEntryOffset(
2394 invoke->GetImtIndex() % mirror::Class::kImtSize, kMips64PointerSize).Uint32Value();
2395 Location receiver = invoke->GetLocations()->InAt(0);
2396 uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
2397 Offset entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kMips64WordSize);
2398
2399 // Set the hidden argument.
2400 __ LoadConst32(invoke->GetLocations()->GetTemp(1).AsRegister<GpuRegister>(),
2401 invoke->GetDexMethodIndex());
2402
2403 // temp = object->GetClass();
2404 if (receiver.IsStackSlot()) {
2405 __ LoadFromOffset(kLoadUnsignedWord, temp, SP, receiver.GetStackIndex());
2406 __ LoadFromOffset(kLoadUnsignedWord, temp, temp, class_offset);
2407 } else {
2408 __ LoadFromOffset(kLoadUnsignedWord, temp, receiver.AsRegister<GpuRegister>(), class_offset);
2409 }
2410 codegen_->MaybeRecordImplicitNullCheck(invoke);
2411 // temp = temp->GetImtEntryAt(method_offset);
2412 __ LoadFromOffset(kLoadDoubleword, temp, temp, method_offset);
2413 // T9 = temp->GetEntryPoint();
2414 __ LoadFromOffset(kLoadDoubleword, T9, temp, entry_point.Int32Value());
2415 // T9();
2416 __ Jalr(T9);
2417 DCHECK(!codegen_->IsLeafMethod());
2418 codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
2419}
2420
2421void LocationsBuilderMIPS64::VisitInvokeVirtual(HInvokeVirtual* invoke) {
Chris Larsen3039e382015-08-26 07:54:08 -07002422 IntrinsicLocationsBuilderMIPS64 intrinsic(codegen_);
2423 if (intrinsic.TryDispatch(invoke)) {
2424 return;
2425 }
2426
Alexey Frunze4dda3372015-06-01 18:31:49 -07002427 HandleInvoke(invoke);
2428}
2429
2430void LocationsBuilderMIPS64::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
2431 // When we do not run baseline, explicit clinit checks triggered by static
2432 // invokes must have been pruned by art::PrepareForRegisterAllocation.
2433 DCHECK(codegen_->IsBaseline() || !invoke->IsStaticWithExplicitClinitCheck());
2434
Chris Larsen3039e382015-08-26 07:54:08 -07002435 IntrinsicLocationsBuilderMIPS64 intrinsic(codegen_);
2436 if (intrinsic.TryDispatch(invoke)) {
2437 return;
2438 }
2439
Alexey Frunze4dda3372015-06-01 18:31:49 -07002440 HandleInvoke(invoke);
2441
2442 // While SetupBlockedRegisters() blocks registers S2-S8 due to their
2443 // clobbering somewhere else, reduce further register pressure by avoiding
2444 // allocation of a register for the current method pointer like on x86 baseline.
2445 // TODO: remove this once all the issues with register saving/restoring are
2446 // sorted out.
2447 LocationSummary* locations = invoke->GetLocations();
2448 Location location = locations->InAt(invoke->GetCurrentMethodInputIndex());
2449 if (location.IsUnallocated() && location.GetPolicy() == Location::kRequiresRegister) {
2450 locations->SetInAt(invoke->GetCurrentMethodInputIndex(), Location::NoLocation());
2451 }
2452}
2453
Chris Larsen3039e382015-08-26 07:54:08 -07002454static bool TryGenerateIntrinsicCode(HInvoke* invoke, CodeGeneratorMIPS64* codegen) {
Alexey Frunze4dda3372015-06-01 18:31:49 -07002455 if (invoke->GetLocations()->Intrinsified()) {
Chris Larsen3039e382015-08-26 07:54:08 -07002456 IntrinsicCodeGeneratorMIPS64 intrinsic(codegen);
2457 intrinsic.Dispatch(invoke);
Alexey Frunze4dda3372015-06-01 18:31:49 -07002458 return true;
2459 }
2460 return false;
2461}
2462
2463void CodeGeneratorMIPS64::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp) {
2464 // All registers are assumed to be correctly set up per the calling convention.
2465
Vladimir Marko58155012015-08-19 12:49:41 +00002466 Location callee_method = temp; // For all kinds except kRecursive, callee will be in temp.
2467 switch (invoke->GetMethodLoadKind()) {
2468 case HInvokeStaticOrDirect::MethodLoadKind::kStringInit:
2469 // temp = thread->string_init_entrypoint
2470 __ LoadFromOffset(kLoadDoubleword,
2471 temp.AsRegister<GpuRegister>(),
2472 TR,
2473 invoke->GetStringInitOffset());
2474 break;
2475 case HInvokeStaticOrDirect::MethodLoadKind::kRecursive:
2476 callee_method = invoke->GetLocations()->InAt(invoke->GetCurrentMethodInputIndex());
2477 break;
2478 case HInvokeStaticOrDirect::MethodLoadKind::kDirectAddress:
2479 __ LoadConst64(temp.AsRegister<GpuRegister>(), invoke->GetMethodAddress());
2480 break;
2481 case HInvokeStaticOrDirect::MethodLoadKind::kDirectAddressWithFixup:
2482 // TODO: Implement this type. (Needs literal support.) At the moment, the
2483 // CompilerDriver will not direct the backend to use this type for MIPS.
2484 LOG(FATAL) << "Unsupported!";
2485 UNREACHABLE();
2486 case HInvokeStaticOrDirect::MethodLoadKind::kDexCachePcRelative:
2487 // TODO: Implement this type. For the moment, we fall back to kDexCacheViaMethod.
2488 FALLTHROUGH_INTENDED;
2489 case HInvokeStaticOrDirect::MethodLoadKind::kDexCacheViaMethod: {
2490 Location current_method = invoke->GetLocations()->InAt(invoke->GetCurrentMethodInputIndex());
2491 GpuRegister reg = temp.AsRegister<GpuRegister>();
2492 GpuRegister method_reg;
2493 if (current_method.IsRegister()) {
2494 method_reg = current_method.AsRegister<GpuRegister>();
2495 } else {
2496 // TODO: use the appropriate DCHECK() here if possible.
2497 // DCHECK(invoke->GetLocations()->Intrinsified());
2498 DCHECK(!current_method.IsValid());
2499 method_reg = reg;
2500 __ Ld(reg, SP, kCurrentMethodStackOffset);
2501 }
Alexey Frunze4dda3372015-06-01 18:31:49 -07002502
Vladimir Marko58155012015-08-19 12:49:41 +00002503 // temp = temp->dex_cache_resolved_methods_;
Vladimir Marko05792b92015-08-03 11:56:49 +01002504 __ LoadFromOffset(kLoadDoubleword,
Vladimir Marko58155012015-08-19 12:49:41 +00002505 reg,
2506 method_reg,
Vladimir Marko05792b92015-08-03 11:56:49 +01002507 ArtMethod::DexCacheResolvedMethodsOffset(kMips64PointerSize).Int32Value());
Vladimir Marko58155012015-08-19 12:49:41 +00002508 // temp = temp[index_in_cache]
2509 uint32_t index_in_cache = invoke->GetTargetMethod().dex_method_index;
2510 __ LoadFromOffset(kLoadDoubleword,
2511 reg,
2512 reg,
2513 CodeGenerator::GetCachePointerOffset(index_in_cache));
2514 break;
Alexey Frunze4dda3372015-06-01 18:31:49 -07002515 }
Alexey Frunze4dda3372015-06-01 18:31:49 -07002516 }
2517
Vladimir Marko58155012015-08-19 12:49:41 +00002518 switch (invoke->GetCodePtrLocation()) {
2519 case HInvokeStaticOrDirect::CodePtrLocation::kCallSelf:
2520 __ Jalr(&frame_entry_label_, T9);
2521 break;
2522 case HInvokeStaticOrDirect::CodePtrLocation::kCallDirect:
2523 // LR = invoke->GetDirectCodePtr();
2524 __ LoadConst64(T9, invoke->GetDirectCodePtr());
2525 // LR()
2526 __ Jalr(T9);
2527 break;
2528 case HInvokeStaticOrDirect::CodePtrLocation::kCallPCRelative:
2529 // TODO: Implement kCallPCRelative. For the moment, we fall back to kMethodCode.
2530 FALLTHROUGH_INTENDED;
2531 case HInvokeStaticOrDirect::CodePtrLocation::kCallDirectWithFixup:
2532 // TODO: Implement kDirectCodeFixup. For the moment, we fall back to kMethodCode.
2533 FALLTHROUGH_INTENDED;
2534 case HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod:
2535 // T9 = callee_method->entry_point_from_quick_compiled_code_;
2536 __ LoadFromOffset(kLoadDoubleword,
2537 T9,
2538 callee_method.AsRegister<GpuRegister>(),
2539 ArtMethod::EntryPointFromQuickCompiledCodeOffset(
2540 kMips64WordSize).Int32Value());
2541 // T9()
2542 __ Jalr(T9);
2543 break;
2544 }
Alexey Frunze4dda3372015-06-01 18:31:49 -07002545 DCHECK(!IsLeafMethod());
2546}
2547
2548void InstructionCodeGeneratorMIPS64::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
2549 // When we do not run baseline, explicit clinit checks triggered by static
2550 // invokes must have been pruned by art::PrepareForRegisterAllocation.
2551 DCHECK(codegen_->IsBaseline() || !invoke->IsStaticWithExplicitClinitCheck());
2552
2553 if (TryGenerateIntrinsicCode(invoke, codegen_)) {
2554 return;
2555 }
2556
2557 LocationSummary* locations = invoke->GetLocations();
2558 codegen_->GenerateStaticOrDirectCall(invoke,
2559 locations->HasTemps()
2560 ? locations->GetTemp(0)
2561 : Location::NoLocation());
2562 codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
2563}
2564
2565void InstructionCodeGeneratorMIPS64::VisitInvokeVirtual(HInvokeVirtual* invoke) {
Chris Larsen3039e382015-08-26 07:54:08 -07002566 if (TryGenerateIntrinsicCode(invoke, codegen_)) {
2567 return;
2568 }
2569
Alexey Frunze4dda3372015-06-01 18:31:49 -07002570 LocationSummary* locations = invoke->GetLocations();
2571 Location receiver = locations->InAt(0);
2572 GpuRegister temp = invoke->GetLocations()->GetTemp(0).AsRegister<GpuRegister>();
2573 size_t method_offset = mirror::Class::EmbeddedVTableEntryOffset(
2574 invoke->GetVTableIndex(), kMips64PointerSize).SizeValue();
2575 uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
2576 Offset entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kMips64WordSize);
2577
2578 // temp = object->GetClass();
2579 DCHECK(receiver.IsRegister());
2580 __ LoadFromOffset(kLoadUnsignedWord, temp, receiver.AsRegister<GpuRegister>(), class_offset);
2581 codegen_->MaybeRecordImplicitNullCheck(invoke);
2582 // temp = temp->GetMethodAt(method_offset);
2583 __ LoadFromOffset(kLoadDoubleword, temp, temp, method_offset);
2584 // T9 = temp->GetEntryPoint();
2585 __ LoadFromOffset(kLoadDoubleword, T9, temp, entry_point.Int32Value());
2586 // T9();
2587 __ Jalr(T9);
2588 DCHECK(!codegen_->IsLeafMethod());
2589 codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
2590}
2591
2592void LocationsBuilderMIPS64::VisitLoadClass(HLoadClass* cls) {
Calin Juravle98893e12015-10-02 21:05:03 +01002593 InvokeRuntimeCallingConvention calling_convention;
2594 CodeGenerator::CreateLoadClassLocationSummary(
2595 cls,
2596 Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
2597 Location::RegisterLocation(A0));
Alexey Frunze4dda3372015-06-01 18:31:49 -07002598}
2599
2600void InstructionCodeGeneratorMIPS64::VisitLoadClass(HLoadClass* cls) {
2601 LocationSummary* locations = cls->GetLocations();
Calin Juravle98893e12015-10-02 21:05:03 +01002602 if (cls->NeedsAccessCheck()) {
2603 codegen_->MoveConstant(locations->GetTemp(0), cls->GetTypeIndex());
2604 codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pInitializeTypeAndVerifyAccess),
2605 cls,
2606 cls->GetDexPc(),
2607 nullptr);
Calin Juravle580b6092015-10-06 17:35:58 +01002608 return;
2609 }
2610
2611 GpuRegister out = locations->Out().AsRegister<GpuRegister>();
2612 GpuRegister current_method = locations->InAt(0).AsRegister<GpuRegister>();
2613 if (cls->IsReferrersClass()) {
Alexey Frunze4dda3372015-06-01 18:31:49 -07002614 DCHECK(!cls->CanCallRuntime());
2615 DCHECK(!cls->MustGenerateClinitCheck());
2616 __ LoadFromOffset(kLoadUnsignedWord, out, current_method,
2617 ArtMethod::DeclaringClassOffset().Int32Value());
2618 } else {
2619 DCHECK(cls->CanCallRuntime());
Vladimir Marko05792b92015-08-03 11:56:49 +01002620 __ LoadFromOffset(kLoadDoubleword, out, current_method,
2621 ArtMethod::DexCacheResolvedTypesOffset(kMips64PointerSize).Int32Value());
Alexey Frunze4dda3372015-06-01 18:31:49 -07002622 __ LoadFromOffset(kLoadUnsignedWord, out, out, CodeGenerator::GetCacheOffset(cls->GetTypeIndex()));
Vladimir Marko05792b92015-08-03 11:56:49 +01002623 // TODO: We will need a read barrier here.
Alexey Frunze4dda3372015-06-01 18:31:49 -07002624 SlowPathCodeMIPS64* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathMIPS64(
2625 cls,
2626 cls,
2627 cls->GetDexPc(),
2628 cls->MustGenerateClinitCheck());
2629 codegen_->AddSlowPath(slow_path);
2630 __ Beqzc(out, slow_path->GetEntryLabel());
2631 if (cls->MustGenerateClinitCheck()) {
2632 GenerateClassInitializationCheck(slow_path, out);
2633 } else {
2634 __ Bind(slow_path->GetExitLabel());
2635 }
2636 }
2637}
2638
David Brazdilcb1c0552015-08-04 16:22:25 +01002639static int32_t GetExceptionTlsOffset() {
2640 return Thread::ExceptionOffset<kMips64WordSize>().Int32Value();
2641}
2642
Alexey Frunze4dda3372015-06-01 18:31:49 -07002643void LocationsBuilderMIPS64::VisitLoadException(HLoadException* load) {
2644 LocationSummary* locations =
2645 new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kNoCall);
2646 locations->SetOut(Location::RequiresRegister());
2647}
2648
2649void InstructionCodeGeneratorMIPS64::VisitLoadException(HLoadException* load) {
2650 GpuRegister out = load->GetLocations()->Out().AsRegister<GpuRegister>();
David Brazdilcb1c0552015-08-04 16:22:25 +01002651 __ LoadFromOffset(kLoadUnsignedWord, out, TR, GetExceptionTlsOffset());
2652}
2653
2654void LocationsBuilderMIPS64::VisitClearException(HClearException* clear) {
2655 new (GetGraph()->GetArena()) LocationSummary(clear, LocationSummary::kNoCall);
2656}
2657
2658void InstructionCodeGeneratorMIPS64::VisitClearException(HClearException* clear ATTRIBUTE_UNUSED) {
2659 __ StoreToOffset(kStoreWord, ZERO, TR, GetExceptionTlsOffset());
Alexey Frunze4dda3372015-06-01 18:31:49 -07002660}
2661
2662void LocationsBuilderMIPS64::VisitLoadLocal(HLoadLocal* load) {
2663 load->SetLocations(nullptr);
2664}
2665
2666void InstructionCodeGeneratorMIPS64::VisitLoadLocal(HLoadLocal* load ATTRIBUTE_UNUSED) {
2667 // Nothing to do, this is driven by the code generator.
2668}
2669
2670void LocationsBuilderMIPS64::VisitLoadString(HLoadString* load) {
2671 LocationSummary* locations =
2672 new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kCallOnSlowPath);
2673 locations->SetInAt(0, Location::RequiresRegister());
2674 locations->SetOut(Location::RequiresRegister());
2675}
2676
2677void InstructionCodeGeneratorMIPS64::VisitLoadString(HLoadString* load) {
2678 SlowPathCodeMIPS64* slow_path = new (GetGraph()->GetArena()) LoadStringSlowPathMIPS64(load);
2679 codegen_->AddSlowPath(slow_path);
2680
2681 LocationSummary* locations = load->GetLocations();
2682 GpuRegister out = locations->Out().AsRegister<GpuRegister>();
2683 GpuRegister current_method = locations->InAt(0).AsRegister<GpuRegister>();
2684 __ LoadFromOffset(kLoadUnsignedWord, out, current_method,
2685 ArtMethod::DeclaringClassOffset().Int32Value());
Vladimir Marko05792b92015-08-03 11:56:49 +01002686 __ LoadFromOffset(kLoadDoubleword, out, out, mirror::Class::DexCacheStringsOffset().Int32Value());
Alexey Frunze4dda3372015-06-01 18:31:49 -07002687 __ LoadFromOffset(kLoadUnsignedWord, out, out, CodeGenerator::GetCacheOffset(load->GetStringIndex()));
Vladimir Marko05792b92015-08-03 11:56:49 +01002688 // TODO: We will need a read barrier here.
Alexey Frunze4dda3372015-06-01 18:31:49 -07002689 __ Beqzc(out, slow_path->GetEntryLabel());
2690 __ Bind(slow_path->GetExitLabel());
2691}
2692
2693void LocationsBuilderMIPS64::VisitLocal(HLocal* local) {
2694 local->SetLocations(nullptr);
2695}
2696
2697void InstructionCodeGeneratorMIPS64::VisitLocal(HLocal* local) {
2698 DCHECK_EQ(local->GetBlock(), GetGraph()->GetEntryBlock());
2699}
2700
2701void LocationsBuilderMIPS64::VisitLongConstant(HLongConstant* constant) {
2702 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(constant);
2703 locations->SetOut(Location::ConstantLocation(constant));
2704}
2705
2706void InstructionCodeGeneratorMIPS64::VisitLongConstant(HLongConstant* constant ATTRIBUTE_UNUSED) {
2707 // Will be generated at use site.
2708}
2709
2710void LocationsBuilderMIPS64::VisitMonitorOperation(HMonitorOperation* instruction) {
2711 LocationSummary* locations =
2712 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
2713 InvokeRuntimeCallingConvention calling_convention;
2714 locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
2715}
2716
2717void InstructionCodeGeneratorMIPS64::VisitMonitorOperation(HMonitorOperation* instruction) {
2718 codegen_->InvokeRuntime(instruction->IsEnter()
2719 ? QUICK_ENTRY_POINT(pLockObject)
2720 : QUICK_ENTRY_POINT(pUnlockObject),
2721 instruction,
2722 instruction->GetDexPc(),
2723 nullptr);
2724 CheckEntrypointTypes<kQuickLockObject, void, mirror::Object*>();
2725}
2726
2727void LocationsBuilderMIPS64::VisitMul(HMul* mul) {
2728 LocationSummary* locations =
2729 new (GetGraph()->GetArena()) LocationSummary(mul, LocationSummary::kNoCall);
2730 switch (mul->GetResultType()) {
2731 case Primitive::kPrimInt:
2732 case Primitive::kPrimLong:
2733 locations->SetInAt(0, Location::RequiresRegister());
2734 locations->SetInAt(1, Location::RequiresRegister());
2735 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2736 break;
2737
2738 case Primitive::kPrimFloat:
2739 case Primitive::kPrimDouble:
2740 locations->SetInAt(0, Location::RequiresFpuRegister());
2741 locations->SetInAt(1, Location::RequiresFpuRegister());
2742 locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
2743 break;
2744
2745 default:
2746 LOG(FATAL) << "Unexpected mul type " << mul->GetResultType();
2747 }
2748}
2749
2750void InstructionCodeGeneratorMIPS64::VisitMul(HMul* instruction) {
2751 Primitive::Type type = instruction->GetType();
2752 LocationSummary* locations = instruction->GetLocations();
2753
2754 switch (type) {
2755 case Primitive::kPrimInt:
2756 case Primitive::kPrimLong: {
2757 GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
2758 GpuRegister lhs = locations->InAt(0).AsRegister<GpuRegister>();
2759 GpuRegister rhs = locations->InAt(1).AsRegister<GpuRegister>();
2760 if (type == Primitive::kPrimInt)
2761 __ MulR6(dst, lhs, rhs);
2762 else
2763 __ Dmul(dst, lhs, rhs);
2764 break;
2765 }
2766 case Primitive::kPrimFloat:
2767 case Primitive::kPrimDouble: {
2768 FpuRegister dst = locations->Out().AsFpuRegister<FpuRegister>();
2769 FpuRegister lhs = locations->InAt(0).AsFpuRegister<FpuRegister>();
2770 FpuRegister rhs = locations->InAt(1).AsFpuRegister<FpuRegister>();
2771 if (type == Primitive::kPrimFloat)
2772 __ MulS(dst, lhs, rhs);
2773 else
2774 __ MulD(dst, lhs, rhs);
2775 break;
2776 }
2777 default:
2778 LOG(FATAL) << "Unexpected mul type " << type;
2779 }
2780}
2781
2782void LocationsBuilderMIPS64::VisitNeg(HNeg* neg) {
2783 LocationSummary* locations =
2784 new (GetGraph()->GetArena()) LocationSummary(neg, LocationSummary::kNoCall);
2785 switch (neg->GetResultType()) {
2786 case Primitive::kPrimInt:
2787 case Primitive::kPrimLong:
2788 locations->SetInAt(0, Location::RequiresRegister());
2789 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2790 break;
2791
2792 case Primitive::kPrimFloat:
2793 case Primitive::kPrimDouble:
2794 locations->SetInAt(0, Location::RequiresFpuRegister());
2795 locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
2796 break;
2797
2798 default:
2799 LOG(FATAL) << "Unexpected neg type " << neg->GetResultType();
2800 }
2801}
2802
2803void InstructionCodeGeneratorMIPS64::VisitNeg(HNeg* instruction) {
2804 Primitive::Type type = instruction->GetType();
2805 LocationSummary* locations = instruction->GetLocations();
2806
2807 switch (type) {
2808 case Primitive::kPrimInt:
2809 case Primitive::kPrimLong: {
2810 GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
2811 GpuRegister src = locations->InAt(0).AsRegister<GpuRegister>();
2812 if (type == Primitive::kPrimInt)
2813 __ Subu(dst, ZERO, src);
2814 else
2815 __ Dsubu(dst, ZERO, src);
2816 break;
2817 }
2818 case Primitive::kPrimFloat:
2819 case Primitive::kPrimDouble: {
2820 FpuRegister dst = locations->Out().AsFpuRegister<FpuRegister>();
2821 FpuRegister src = locations->InAt(0).AsFpuRegister<FpuRegister>();
2822 if (type == Primitive::kPrimFloat)
2823 __ NegS(dst, src);
2824 else
2825 __ NegD(dst, src);
2826 break;
2827 }
2828 default:
2829 LOG(FATAL) << "Unexpected neg type " << type;
2830 }
2831}
2832
2833void LocationsBuilderMIPS64::VisitNewArray(HNewArray* instruction) {
2834 LocationSummary* locations =
2835 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
2836 InvokeRuntimeCallingConvention calling_convention;
2837 locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
2838 locations->SetOut(calling_convention.GetReturnLocation(Primitive::kPrimNot));
2839 locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
2840 locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
2841}
2842
2843void InstructionCodeGeneratorMIPS64::VisitNewArray(HNewArray* instruction) {
2844 LocationSummary* locations = instruction->GetLocations();
2845 // Move an uint16_t value to a register.
2846 __ LoadConst32(locations->GetTemp(0).AsRegister<GpuRegister>(), instruction->GetTypeIndex());
Calin Juravle175dc732015-08-25 15:42:32 +01002847 codegen_->InvokeRuntime(instruction->GetEntrypoint(),
2848 instruction,
2849 instruction->GetDexPc(),
2850 nullptr);
Alexey Frunze4dda3372015-06-01 18:31:49 -07002851 CheckEntrypointTypes<kQuickAllocArrayWithAccessCheck, void*, uint32_t, int32_t, ArtMethod*>();
2852}
2853
2854void LocationsBuilderMIPS64::VisitNewInstance(HNewInstance* instruction) {
2855 LocationSummary* locations =
2856 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
2857 InvokeRuntimeCallingConvention calling_convention;
2858 locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
2859 locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
2860 locations->SetOut(calling_convention.GetReturnLocation(Primitive::kPrimNot));
2861}
2862
2863void InstructionCodeGeneratorMIPS64::VisitNewInstance(HNewInstance* instruction) {
2864 LocationSummary* locations = instruction->GetLocations();
2865 // Move an uint16_t value to a register.
2866 __ LoadConst32(locations->GetTemp(0).AsRegister<GpuRegister>(), instruction->GetTypeIndex());
Calin Juravle175dc732015-08-25 15:42:32 +01002867 codegen_->InvokeRuntime(instruction->GetEntrypoint(),
2868 instruction,
2869 instruction->GetDexPc(),
2870 nullptr);
Alexey Frunze4dda3372015-06-01 18:31:49 -07002871 CheckEntrypointTypes<kQuickAllocObjectWithAccessCheck, void*, uint32_t, ArtMethod*>();
2872}
2873
2874void LocationsBuilderMIPS64::VisitNot(HNot* instruction) {
2875 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
2876 locations->SetInAt(0, Location::RequiresRegister());
2877 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2878}
2879
2880void InstructionCodeGeneratorMIPS64::VisitNot(HNot* instruction) {
2881 Primitive::Type type = instruction->GetType();
2882 LocationSummary* locations = instruction->GetLocations();
2883
2884 switch (type) {
2885 case Primitive::kPrimInt:
2886 case Primitive::kPrimLong: {
2887 GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
2888 GpuRegister src = locations->InAt(0).AsRegister<GpuRegister>();
2889 __ Nor(dst, src, ZERO);
2890 break;
2891 }
2892
2893 default:
2894 LOG(FATAL) << "Unexpected type for not operation " << instruction->GetResultType();
2895 }
2896}
2897
2898void LocationsBuilderMIPS64::VisitBooleanNot(HBooleanNot* instruction) {
2899 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
2900 locations->SetInAt(0, Location::RequiresRegister());
2901 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2902}
2903
2904void InstructionCodeGeneratorMIPS64::VisitBooleanNot(HBooleanNot* instruction) {
2905 LocationSummary* locations = instruction->GetLocations();
2906 __ Xori(locations->Out().AsRegister<GpuRegister>(),
2907 locations->InAt(0).AsRegister<GpuRegister>(),
2908 1);
2909}
2910
2911void LocationsBuilderMIPS64::VisitNullCheck(HNullCheck* instruction) {
David Brazdil77a48ae2015-09-15 12:34:04 +00002912 LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock()
2913 ? LocationSummary::kCallOnSlowPath
2914 : LocationSummary::kNoCall;
2915 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
Alexey Frunze4dda3372015-06-01 18:31:49 -07002916 locations->SetInAt(0, Location::RequiresRegister());
2917 if (instruction->HasUses()) {
2918 locations->SetOut(Location::SameAsFirstInput());
2919 }
2920}
2921
2922void InstructionCodeGeneratorMIPS64::GenerateImplicitNullCheck(HNullCheck* instruction) {
2923 if (codegen_->CanMoveNullCheckToUser(instruction)) {
2924 return;
2925 }
2926 Location obj = instruction->GetLocations()->InAt(0);
2927
2928 __ Lw(ZERO, obj.AsRegister<GpuRegister>(), 0);
2929 codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
2930}
2931
2932void InstructionCodeGeneratorMIPS64::GenerateExplicitNullCheck(HNullCheck* instruction) {
2933 SlowPathCodeMIPS64* slow_path = new (GetGraph()->GetArena()) NullCheckSlowPathMIPS64(instruction);
2934 codegen_->AddSlowPath(slow_path);
2935
2936 Location obj = instruction->GetLocations()->InAt(0);
2937
2938 __ Beqzc(obj.AsRegister<GpuRegister>(), slow_path->GetEntryLabel());
2939}
2940
2941void InstructionCodeGeneratorMIPS64::VisitNullCheck(HNullCheck* instruction) {
David Brazdil77a48ae2015-09-15 12:34:04 +00002942 if (codegen_->IsImplicitNullCheckAllowed(instruction)) {
Alexey Frunze4dda3372015-06-01 18:31:49 -07002943 GenerateImplicitNullCheck(instruction);
2944 } else {
2945 GenerateExplicitNullCheck(instruction);
2946 }
2947}
2948
2949void LocationsBuilderMIPS64::VisitOr(HOr* instruction) {
2950 HandleBinaryOp(instruction);
2951}
2952
2953void InstructionCodeGeneratorMIPS64::VisitOr(HOr* instruction) {
2954 HandleBinaryOp(instruction);
2955}
2956
2957void LocationsBuilderMIPS64::VisitParallelMove(HParallelMove* instruction ATTRIBUTE_UNUSED) {
2958 LOG(FATAL) << "Unreachable";
2959}
2960
2961void InstructionCodeGeneratorMIPS64::VisitParallelMove(HParallelMove* instruction) {
2962 codegen_->GetMoveResolver()->EmitNativeCode(instruction);
2963}
2964
2965void LocationsBuilderMIPS64::VisitParameterValue(HParameterValue* instruction) {
2966 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
2967 Location location = parameter_visitor_.GetNextLocation(instruction->GetType());
2968 if (location.IsStackSlot()) {
2969 location = Location::StackSlot(location.GetStackIndex() + codegen_->GetFrameSize());
2970 } else if (location.IsDoubleStackSlot()) {
2971 location = Location::DoubleStackSlot(location.GetStackIndex() + codegen_->GetFrameSize());
2972 }
2973 locations->SetOut(location);
2974}
2975
2976void InstructionCodeGeneratorMIPS64::VisitParameterValue(HParameterValue* instruction
2977 ATTRIBUTE_UNUSED) {
2978 // Nothing to do, the parameter is already at its location.
2979}
2980
2981void LocationsBuilderMIPS64::VisitCurrentMethod(HCurrentMethod* instruction) {
2982 LocationSummary* locations =
2983 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
2984 locations->SetOut(Location::RegisterLocation(kMethodRegisterArgument));
2985}
2986
2987void InstructionCodeGeneratorMIPS64::VisitCurrentMethod(HCurrentMethod* instruction
2988 ATTRIBUTE_UNUSED) {
2989 // Nothing to do, the method is already at its location.
2990}
2991
2992void LocationsBuilderMIPS64::VisitPhi(HPhi* instruction) {
2993 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
2994 for (size_t i = 0, e = instruction->InputCount(); i < e; ++i) {
2995 locations->SetInAt(i, Location::Any());
2996 }
2997 locations->SetOut(Location::Any());
2998}
2999
3000void InstructionCodeGeneratorMIPS64::VisitPhi(HPhi* instruction ATTRIBUTE_UNUSED) {
3001 LOG(FATAL) << "Unreachable";
3002}
3003
3004void LocationsBuilderMIPS64::VisitRem(HRem* rem) {
3005 Primitive::Type type = rem->GetResultType();
3006 LocationSummary::CallKind call_kind =
3007 Primitive::IsFloatingPointType(type) ? LocationSummary::kCall : LocationSummary::kNoCall;
3008 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(rem, call_kind);
3009
3010 switch (type) {
3011 case Primitive::kPrimInt:
3012 case Primitive::kPrimLong:
3013 locations->SetInAt(0, Location::RequiresRegister());
3014 locations->SetInAt(1, Location::RequiresRegister());
3015 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
3016 break;
3017
3018 case Primitive::kPrimFloat:
3019 case Primitive::kPrimDouble: {
3020 InvokeRuntimeCallingConvention calling_convention;
3021 locations->SetInAt(0, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(0)));
3022 locations->SetInAt(1, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(1)));
3023 locations->SetOut(calling_convention.GetReturnLocation(type));
3024 break;
3025 }
3026
3027 default:
3028 LOG(FATAL) << "Unexpected rem type " << type;
3029 }
3030}
3031
3032void InstructionCodeGeneratorMIPS64::VisitRem(HRem* instruction) {
3033 Primitive::Type type = instruction->GetType();
3034 LocationSummary* locations = instruction->GetLocations();
3035
3036 switch (type) {
3037 case Primitive::kPrimInt:
3038 case Primitive::kPrimLong: {
3039 GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
3040 GpuRegister lhs = locations->InAt(0).AsRegister<GpuRegister>();
3041 GpuRegister rhs = locations->InAt(1).AsRegister<GpuRegister>();
3042 if (type == Primitive::kPrimInt)
3043 __ ModR6(dst, lhs, rhs);
3044 else
3045 __ Dmod(dst, lhs, rhs);
3046 break;
3047 }
3048
3049 case Primitive::kPrimFloat:
3050 case Primitive::kPrimDouble: {
3051 int32_t entry_offset = (type == Primitive::kPrimFloat) ? QUICK_ENTRY_POINT(pFmodf)
3052 : QUICK_ENTRY_POINT(pFmod);
3053 codegen_->InvokeRuntime(entry_offset, instruction, instruction->GetDexPc(), nullptr);
3054 break;
3055 }
3056 default:
3057 LOG(FATAL) << "Unexpected rem type " << type;
3058 }
3059}
3060
3061void LocationsBuilderMIPS64::VisitMemoryBarrier(HMemoryBarrier* memory_barrier) {
3062 memory_barrier->SetLocations(nullptr);
3063}
3064
3065void InstructionCodeGeneratorMIPS64::VisitMemoryBarrier(HMemoryBarrier* memory_barrier) {
3066 GenerateMemoryBarrier(memory_barrier->GetBarrierKind());
3067}
3068
3069void LocationsBuilderMIPS64::VisitReturn(HReturn* ret) {
3070 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(ret);
3071 Primitive::Type return_type = ret->InputAt(0)->GetType();
3072 locations->SetInAt(0, Mips64ReturnLocation(return_type));
3073}
3074
3075void InstructionCodeGeneratorMIPS64::VisitReturn(HReturn* ret ATTRIBUTE_UNUSED) {
3076 codegen_->GenerateFrameExit();
3077}
3078
3079void LocationsBuilderMIPS64::VisitReturnVoid(HReturnVoid* ret) {
3080 ret->SetLocations(nullptr);
3081}
3082
3083void InstructionCodeGeneratorMIPS64::VisitReturnVoid(HReturnVoid* ret ATTRIBUTE_UNUSED) {
3084 codegen_->GenerateFrameExit();
3085}
3086
3087void LocationsBuilderMIPS64::VisitShl(HShl* shl) {
3088 HandleShift(shl);
3089}
3090
3091void InstructionCodeGeneratorMIPS64::VisitShl(HShl* shl) {
3092 HandleShift(shl);
3093}
3094
3095void LocationsBuilderMIPS64::VisitShr(HShr* shr) {
3096 HandleShift(shr);
3097}
3098
3099void InstructionCodeGeneratorMIPS64::VisitShr(HShr* shr) {
3100 HandleShift(shr);
3101}
3102
3103void LocationsBuilderMIPS64::VisitStoreLocal(HStoreLocal* store) {
3104 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(store);
3105 Primitive::Type field_type = store->InputAt(1)->GetType();
3106 switch (field_type) {
3107 case Primitive::kPrimNot:
3108 case Primitive::kPrimBoolean:
3109 case Primitive::kPrimByte:
3110 case Primitive::kPrimChar:
3111 case Primitive::kPrimShort:
3112 case Primitive::kPrimInt:
3113 case Primitive::kPrimFloat:
3114 locations->SetInAt(1, Location::StackSlot(codegen_->GetStackSlot(store->GetLocal())));
3115 break;
3116
3117 case Primitive::kPrimLong:
3118 case Primitive::kPrimDouble:
3119 locations->SetInAt(1, Location::DoubleStackSlot(codegen_->GetStackSlot(store->GetLocal())));
3120 break;
3121
3122 default:
3123 LOG(FATAL) << "Unimplemented local type " << field_type;
3124 }
3125}
3126
3127void InstructionCodeGeneratorMIPS64::VisitStoreLocal(HStoreLocal* store ATTRIBUTE_UNUSED) {
3128}
3129
3130void LocationsBuilderMIPS64::VisitSub(HSub* instruction) {
3131 HandleBinaryOp(instruction);
3132}
3133
3134void InstructionCodeGeneratorMIPS64::VisitSub(HSub* instruction) {
3135 HandleBinaryOp(instruction);
3136}
3137
3138void LocationsBuilderMIPS64::VisitStaticFieldGet(HStaticFieldGet* instruction) {
3139 HandleFieldGet(instruction, instruction->GetFieldInfo());
3140}
3141
3142void InstructionCodeGeneratorMIPS64::VisitStaticFieldGet(HStaticFieldGet* instruction) {
3143 HandleFieldGet(instruction, instruction->GetFieldInfo());
3144}
3145
3146void LocationsBuilderMIPS64::VisitStaticFieldSet(HStaticFieldSet* instruction) {
3147 HandleFieldSet(instruction, instruction->GetFieldInfo());
3148}
3149
3150void InstructionCodeGeneratorMIPS64::VisitStaticFieldSet(HStaticFieldSet* instruction) {
3151 HandleFieldSet(instruction, instruction->GetFieldInfo());
3152}
3153
Calin Juravlee460d1d2015-09-29 04:52:17 +01003154void LocationsBuilderMIPS64::VisitUnresolvedInstanceFieldGet(
3155 HUnresolvedInstanceFieldGet* instruction) {
3156 FieldAccessCallingConventionMIPS64 calling_convention;
3157 codegen_->CreateUnresolvedFieldLocationSummary(
3158 instruction, instruction->GetFieldType(), calling_convention);
3159}
3160
3161void InstructionCodeGeneratorMIPS64::VisitUnresolvedInstanceFieldGet(
3162 HUnresolvedInstanceFieldGet* instruction) {
3163 FieldAccessCallingConventionMIPS64 calling_convention;
3164 codegen_->GenerateUnresolvedFieldAccess(instruction,
3165 instruction->GetFieldType(),
3166 instruction->GetFieldIndex(),
3167 instruction->GetDexPc(),
3168 calling_convention);
3169}
3170
3171void LocationsBuilderMIPS64::VisitUnresolvedInstanceFieldSet(
3172 HUnresolvedInstanceFieldSet* instruction) {
3173 FieldAccessCallingConventionMIPS64 calling_convention;
3174 codegen_->CreateUnresolvedFieldLocationSummary(
3175 instruction, instruction->GetFieldType(), calling_convention);
3176}
3177
3178void InstructionCodeGeneratorMIPS64::VisitUnresolvedInstanceFieldSet(
3179 HUnresolvedInstanceFieldSet* instruction) {
3180 FieldAccessCallingConventionMIPS64 calling_convention;
3181 codegen_->GenerateUnresolvedFieldAccess(instruction,
3182 instruction->GetFieldType(),
3183 instruction->GetFieldIndex(),
3184 instruction->GetDexPc(),
3185 calling_convention);
3186}
3187
3188void LocationsBuilderMIPS64::VisitUnresolvedStaticFieldGet(
3189 HUnresolvedStaticFieldGet* instruction) {
3190 FieldAccessCallingConventionMIPS64 calling_convention;
3191 codegen_->CreateUnresolvedFieldLocationSummary(
3192 instruction, instruction->GetFieldType(), calling_convention);
3193}
3194
3195void InstructionCodeGeneratorMIPS64::VisitUnresolvedStaticFieldGet(
3196 HUnresolvedStaticFieldGet* instruction) {
3197 FieldAccessCallingConventionMIPS64 calling_convention;
3198 codegen_->GenerateUnresolvedFieldAccess(instruction,
3199 instruction->GetFieldType(),
3200 instruction->GetFieldIndex(),
3201 instruction->GetDexPc(),
3202 calling_convention);
3203}
3204
3205void LocationsBuilderMIPS64::VisitUnresolvedStaticFieldSet(
3206 HUnresolvedStaticFieldSet* instruction) {
3207 FieldAccessCallingConventionMIPS64 calling_convention;
3208 codegen_->CreateUnresolvedFieldLocationSummary(
3209 instruction, instruction->GetFieldType(), calling_convention);
3210}
3211
3212void InstructionCodeGeneratorMIPS64::VisitUnresolvedStaticFieldSet(
3213 HUnresolvedStaticFieldSet* instruction) {
3214 FieldAccessCallingConventionMIPS64 calling_convention;
3215 codegen_->GenerateUnresolvedFieldAccess(instruction,
3216 instruction->GetFieldType(),
3217 instruction->GetFieldIndex(),
3218 instruction->GetDexPc(),
3219 calling_convention);
3220}
3221
Alexey Frunze4dda3372015-06-01 18:31:49 -07003222void LocationsBuilderMIPS64::VisitSuspendCheck(HSuspendCheck* instruction) {
3223 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnSlowPath);
3224}
3225
3226void InstructionCodeGeneratorMIPS64::VisitSuspendCheck(HSuspendCheck* instruction) {
3227 HBasicBlock* block = instruction->GetBlock();
3228 if (block->GetLoopInformation() != nullptr) {
3229 DCHECK(block->GetLoopInformation()->GetSuspendCheck() == instruction);
3230 // The back edge will generate the suspend check.
3231 return;
3232 }
3233 if (block->IsEntryBlock() && instruction->GetNext()->IsGoto()) {
3234 // The goto will generate the suspend check.
3235 return;
3236 }
3237 GenerateSuspendCheck(instruction, nullptr);
3238}
3239
3240void LocationsBuilderMIPS64::VisitTemporary(HTemporary* temp) {
3241 temp->SetLocations(nullptr);
3242}
3243
3244void InstructionCodeGeneratorMIPS64::VisitTemporary(HTemporary* temp ATTRIBUTE_UNUSED) {
3245 // Nothing to do, this is driven by the code generator.
3246}
3247
3248void LocationsBuilderMIPS64::VisitThrow(HThrow* instruction) {
3249 LocationSummary* locations =
3250 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
3251 InvokeRuntimeCallingConvention calling_convention;
3252 locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
3253}
3254
3255void InstructionCodeGeneratorMIPS64::VisitThrow(HThrow* instruction) {
3256 codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pDeliverException),
3257 instruction,
3258 instruction->GetDexPc(),
3259 nullptr);
3260 CheckEntrypointTypes<kQuickDeliverException, void, mirror::Object*>();
3261}
3262
3263void LocationsBuilderMIPS64::VisitTypeConversion(HTypeConversion* conversion) {
3264 Primitive::Type input_type = conversion->GetInputType();
3265 Primitive::Type result_type = conversion->GetResultType();
3266 DCHECK_NE(input_type, result_type);
3267
3268 if ((input_type == Primitive::kPrimNot) || (input_type == Primitive::kPrimVoid) ||
3269 (result_type == Primitive::kPrimNot) || (result_type == Primitive::kPrimVoid)) {
3270 LOG(FATAL) << "Unexpected type conversion from " << input_type << " to " << result_type;
3271 }
3272
3273 LocationSummary::CallKind call_kind = LocationSummary::kNoCall;
3274 if ((Primitive::IsFloatingPointType(result_type) && input_type == Primitive::kPrimLong) ||
3275 (Primitive::IsIntegralType(result_type) && Primitive::IsFloatingPointType(input_type))) {
3276 call_kind = LocationSummary::kCall;
3277 }
3278
3279 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(conversion, call_kind);
3280
3281 if (call_kind == LocationSummary::kNoCall) {
3282 if (Primitive::IsFloatingPointType(input_type)) {
3283 locations->SetInAt(0, Location::RequiresFpuRegister());
3284 } else {
3285 locations->SetInAt(0, Location::RequiresRegister());
3286 }
3287
3288 if (Primitive::IsFloatingPointType(result_type)) {
3289 locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
3290 } else {
3291 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
3292 }
3293 } else {
3294 InvokeRuntimeCallingConvention calling_convention;
3295
3296 if (Primitive::IsFloatingPointType(input_type)) {
3297 locations->SetInAt(0, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(0)));
3298 } else {
3299 locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
3300 }
3301
3302 locations->SetOut(calling_convention.GetReturnLocation(result_type));
3303 }
3304}
3305
3306void InstructionCodeGeneratorMIPS64::VisitTypeConversion(HTypeConversion* conversion) {
3307 LocationSummary* locations = conversion->GetLocations();
3308 Primitive::Type result_type = conversion->GetResultType();
3309 Primitive::Type input_type = conversion->GetInputType();
3310
3311 DCHECK_NE(input_type, result_type);
3312
3313 if (Primitive::IsIntegralType(result_type) && Primitive::IsIntegralType(input_type)) {
3314 GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
3315 GpuRegister src = locations->InAt(0).AsRegister<GpuRegister>();
3316
3317 switch (result_type) {
3318 case Primitive::kPrimChar:
3319 __ Andi(dst, src, 0xFFFF);
3320 break;
3321 case Primitive::kPrimByte:
3322 // long is never converted into types narrower than int directly,
3323 // so SEB and SEH can be used without ever causing unpredictable results
3324 // on 64-bit inputs
3325 DCHECK(input_type != Primitive::kPrimLong);
3326 __ Seb(dst, src);
3327 break;
3328 case Primitive::kPrimShort:
3329 // long is never converted into types narrower than int directly,
3330 // so SEB and SEH can be used without ever causing unpredictable results
3331 // on 64-bit inputs
3332 DCHECK(input_type != Primitive::kPrimLong);
3333 __ Seh(dst, src);
3334 break;
3335 case Primitive::kPrimInt:
3336 case Primitive::kPrimLong:
3337 // Sign-extend 32-bit int into bits 32 through 63 for
3338 // int-to-long and long-to-int conversions
3339 __ Sll(dst, src, 0);
3340 break;
3341
3342 default:
3343 LOG(FATAL) << "Unexpected type conversion from " << input_type
3344 << " to " << result_type;
3345 }
3346 } else if (Primitive::IsFloatingPointType(result_type) && Primitive::IsIntegralType(input_type)) {
3347 if (input_type != Primitive::kPrimLong) {
3348 FpuRegister dst = locations->Out().AsFpuRegister<FpuRegister>();
3349 GpuRegister src = locations->InAt(0).AsRegister<GpuRegister>();
3350 __ Mtc1(src, FTMP);
3351 if (result_type == Primitive::kPrimFloat) {
3352 __ Cvtsw(dst, FTMP);
3353 } else {
3354 __ Cvtdw(dst, FTMP);
3355 }
3356 } else {
3357 int32_t entry_offset = (result_type == Primitive::kPrimFloat) ? QUICK_ENTRY_POINT(pL2f)
3358 : QUICK_ENTRY_POINT(pL2d);
3359 codegen_->InvokeRuntime(entry_offset,
3360 conversion,
3361 conversion->GetDexPc(),
3362 nullptr);
3363 }
3364 } else if (Primitive::IsIntegralType(result_type) && Primitive::IsFloatingPointType(input_type)) {
3365 CHECK(result_type == Primitive::kPrimInt || result_type == Primitive::kPrimLong);
3366 int32_t entry_offset;
3367 if (result_type != Primitive::kPrimLong) {
3368 entry_offset = (input_type == Primitive::kPrimFloat) ? QUICK_ENTRY_POINT(pF2iz)
3369 : QUICK_ENTRY_POINT(pD2iz);
3370 } else {
3371 entry_offset = (input_type == Primitive::kPrimFloat) ? QUICK_ENTRY_POINT(pF2l)
3372 : QUICK_ENTRY_POINT(pD2l);
3373 }
3374 codegen_->InvokeRuntime(entry_offset,
3375 conversion,
3376 conversion->GetDexPc(),
3377 nullptr);
3378 } else if (Primitive::IsFloatingPointType(result_type) &&
3379 Primitive::IsFloatingPointType(input_type)) {
3380 FpuRegister dst = locations->Out().AsFpuRegister<FpuRegister>();
3381 FpuRegister src = locations->InAt(0).AsFpuRegister<FpuRegister>();
3382 if (result_type == Primitive::kPrimFloat) {
3383 __ Cvtsd(dst, src);
3384 } else {
3385 __ Cvtds(dst, src);
3386 }
3387 } else {
3388 LOG(FATAL) << "Unexpected or unimplemented type conversion from " << input_type
3389 << " to " << result_type;
3390 }
3391}
3392
3393void LocationsBuilderMIPS64::VisitUShr(HUShr* ushr) {
3394 HandleShift(ushr);
3395}
3396
3397void InstructionCodeGeneratorMIPS64::VisitUShr(HUShr* ushr) {
3398 HandleShift(ushr);
3399}
3400
3401void LocationsBuilderMIPS64::VisitXor(HXor* instruction) {
3402 HandleBinaryOp(instruction);
3403}
3404
3405void InstructionCodeGeneratorMIPS64::VisitXor(HXor* instruction) {
3406 HandleBinaryOp(instruction);
3407}
3408
3409void LocationsBuilderMIPS64::VisitBoundType(HBoundType* instruction ATTRIBUTE_UNUSED) {
3410 // Nothing to do, this should be removed during prepare for register allocator.
3411 LOG(FATAL) << "Unreachable";
3412}
3413
3414void InstructionCodeGeneratorMIPS64::VisitBoundType(HBoundType* instruction ATTRIBUTE_UNUSED) {
3415 // Nothing to do, this should be removed during prepare for register allocator.
3416 LOG(FATAL) << "Unreachable";
3417}
3418
3419void LocationsBuilderMIPS64::VisitEqual(HEqual* comp) {
3420 VisitCondition(comp);
3421}
3422
3423void InstructionCodeGeneratorMIPS64::VisitEqual(HEqual* comp) {
3424 VisitCondition(comp);
3425}
3426
3427void LocationsBuilderMIPS64::VisitNotEqual(HNotEqual* comp) {
3428 VisitCondition(comp);
3429}
3430
3431void InstructionCodeGeneratorMIPS64::VisitNotEqual(HNotEqual* comp) {
3432 VisitCondition(comp);
3433}
3434
3435void LocationsBuilderMIPS64::VisitLessThan(HLessThan* comp) {
3436 VisitCondition(comp);
3437}
3438
3439void InstructionCodeGeneratorMIPS64::VisitLessThan(HLessThan* comp) {
3440 VisitCondition(comp);
3441}
3442
3443void LocationsBuilderMIPS64::VisitLessThanOrEqual(HLessThanOrEqual* comp) {
3444 VisitCondition(comp);
3445}
3446
3447void InstructionCodeGeneratorMIPS64::VisitLessThanOrEqual(HLessThanOrEqual* comp) {
3448 VisitCondition(comp);
3449}
3450
3451void LocationsBuilderMIPS64::VisitGreaterThan(HGreaterThan* comp) {
3452 VisitCondition(comp);
3453}
3454
3455void InstructionCodeGeneratorMIPS64::VisitGreaterThan(HGreaterThan* comp) {
3456 VisitCondition(comp);
3457}
3458
3459void LocationsBuilderMIPS64::VisitGreaterThanOrEqual(HGreaterThanOrEqual* comp) {
3460 VisitCondition(comp);
3461}
3462
3463void InstructionCodeGeneratorMIPS64::VisitGreaterThanOrEqual(HGreaterThanOrEqual* comp) {
3464 VisitCondition(comp);
3465}
3466
Nicolas Geoffray2e7cd752015-07-10 11:38:52 +01003467void LocationsBuilderMIPS64::VisitFakeString(HFakeString* instruction) {
3468 DCHECK(codegen_->IsBaseline());
3469 LocationSummary* locations =
3470 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
3471 locations->SetOut(Location::ConstantLocation(GetGraph()->GetNullConstant()));
3472}
3473
3474void InstructionCodeGeneratorMIPS64::VisitFakeString(HFakeString* instruction ATTRIBUTE_UNUSED) {
3475 DCHECK(codegen_->IsBaseline());
3476 // Will be generated at use site.
3477}
3478
Mark Mendellfe57faa2015-09-18 09:26:15 -04003479// Simple implementation of packed switch - generate cascaded compare/jumps.
3480void LocationsBuilderMIPS64::VisitPackedSwitch(HPackedSwitch* switch_instr) {
3481 LocationSummary* locations =
3482 new (GetGraph()->GetArena()) LocationSummary(switch_instr, LocationSummary::kNoCall);
3483 locations->SetInAt(0, Location::RequiresRegister());
3484}
3485
3486void InstructionCodeGeneratorMIPS64::VisitPackedSwitch(HPackedSwitch* switch_instr) {
3487 int32_t lower_bound = switch_instr->GetStartValue();
3488 int32_t num_entries = switch_instr->GetNumEntries();
3489 LocationSummary* locations = switch_instr->GetLocations();
3490 GpuRegister value_reg = locations->InAt(0).AsRegister<GpuRegister>();
3491 HBasicBlock* default_block = switch_instr->GetDefaultBlock();
3492
3493 // Create a series of compare/jumps.
3494 const ArenaVector<HBasicBlock*>& successors = switch_instr->GetBlock()->GetSuccessors();
3495 for (int32_t i = 0; i < num_entries; i++) {
3496 int32_t case_value = lower_bound + i;
3497 Label* succ = codegen_->GetLabelOf(successors.at(i));
3498 if (case_value == 0) {
3499 __ Beqzc(value_reg, succ);
3500 } else {
3501 __ LoadConst32(TMP, case_value);
3502 __ Beqc(value_reg, TMP, succ);
3503 }
3504 }
3505
3506 // And the default for any other value.
3507 if (!codegen_->GoesToNextBlock(switch_instr->GetBlock(), default_block)) {
3508 __ B(codegen_->GetLabelOf(default_block));
3509 }
3510}
3511
Alexey Frunze4dda3372015-06-01 18:31:49 -07003512} // namespace mips64
3513} // namespace art