blob: 424ac7c85545ea5f79e643f409805fe5c8b980a8 [file] [log] [blame]
Mark Mendell09ed1a32015-03-25 08:30:06 -04001/*
2 * Copyright (C) 2015 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "intrinsics_x86.h"
18
Andreas Gampe21030dd2015-05-07 14:46:15 -070019#include <limits>
20
Mark Mendellfb8d2792015-03-31 22:16:59 -040021#include "arch/x86/instruction_set_features_x86.h"
Mathieu Chartiere401d142015-04-22 13:56:20 -070022#include "art_method.h"
Mark Mendell09ed1a32015-03-25 08:30:06 -040023#include "code_generator_x86.h"
24#include "entrypoints/quick/quick_entrypoints.h"
25#include "intrinsics.h"
26#include "mirror/array-inl.h"
Mark Mendell09ed1a32015-03-25 08:30:06 -040027#include "mirror/string.h"
28#include "thread.h"
29#include "utils/x86/assembler_x86.h"
30#include "utils/x86/constants_x86.h"
31
32namespace art {
33
34namespace x86 {
35
36static constexpr int kDoubleNaNHigh = 0x7FF80000;
37static constexpr int kDoubleNaNLow = 0x00000000;
38static constexpr int kFloatNaN = 0x7FC00000;
39
Mark Mendellfb8d2792015-03-31 22:16:59 -040040IntrinsicLocationsBuilderX86::IntrinsicLocationsBuilderX86(CodeGeneratorX86* codegen)
41 : arena_(codegen->GetGraph()->GetArena()), codegen_(codegen) {
42}
43
44
Mark Mendell09ed1a32015-03-25 08:30:06 -040045X86Assembler* IntrinsicCodeGeneratorX86::GetAssembler() {
46 return reinterpret_cast<X86Assembler*>(codegen_->GetAssembler());
47}
48
49ArenaAllocator* IntrinsicCodeGeneratorX86::GetAllocator() {
50 return codegen_->GetGraph()->GetArena();
51}
52
53bool IntrinsicLocationsBuilderX86::TryDispatch(HInvoke* invoke) {
54 Dispatch(invoke);
55 LocationSummary* res = invoke->GetLocations();
56 return res != nullptr && res->Intrinsified();
57}
58
59#define __ reinterpret_cast<X86Assembler*>(codegen->GetAssembler())->
60
61// TODO: target as memory.
62static void MoveFromReturnRegister(Location target,
63 Primitive::Type type,
64 CodeGeneratorX86* codegen) {
65 if (!target.IsValid()) {
66 DCHECK(type == Primitive::kPrimVoid);
67 return;
68 }
69
70 switch (type) {
71 case Primitive::kPrimBoolean:
72 case Primitive::kPrimByte:
73 case Primitive::kPrimChar:
74 case Primitive::kPrimShort:
75 case Primitive::kPrimInt:
76 case Primitive::kPrimNot: {
77 Register target_reg = target.AsRegister<Register>();
78 if (target_reg != EAX) {
79 __ movl(target_reg, EAX);
80 }
81 break;
82 }
83 case Primitive::kPrimLong: {
84 Register target_reg_lo = target.AsRegisterPairLow<Register>();
85 Register target_reg_hi = target.AsRegisterPairHigh<Register>();
86 if (target_reg_lo != EAX) {
87 __ movl(target_reg_lo, EAX);
88 }
89 if (target_reg_hi != EDX) {
90 __ movl(target_reg_hi, EDX);
91 }
92 break;
93 }
94
95 case Primitive::kPrimVoid:
96 LOG(FATAL) << "Unexpected void type for valid location " << target;
97 UNREACHABLE();
98
99 case Primitive::kPrimDouble: {
100 XmmRegister target_reg = target.AsFpuRegister<XmmRegister>();
101 if (target_reg != XMM0) {
102 __ movsd(target_reg, XMM0);
103 }
104 break;
105 }
106 case Primitive::kPrimFloat: {
107 XmmRegister target_reg = target.AsFpuRegister<XmmRegister>();
108 if (target_reg != XMM0) {
109 __ movss(target_reg, XMM0);
110 }
111 break;
112 }
113 }
114}
115
Roland Levillainec525fc2015-04-28 15:50:20 +0100116static void MoveArguments(HInvoke* invoke, CodeGeneratorX86* codegen) {
Roland Levillain2d27c8e2015-04-28 15:48:45 +0100117 InvokeDexCallingConventionVisitorX86 calling_convention_visitor;
Roland Levillainec525fc2015-04-28 15:50:20 +0100118 IntrinsicVisitor::MoveArguments(invoke, codegen, &calling_convention_visitor);
Mark Mendell09ed1a32015-03-25 08:30:06 -0400119}
120
121// Slow-path for fallback (calling the managed code to handle the intrinsic) in an intrinsified
122// call. This will copy the arguments into the positions for a regular call.
123//
124// Note: The actual parameters are required to be in the locations given by the invoke's location
125// summary. If an intrinsic modifies those locations before a slowpath call, they must be
126// restored!
127class IntrinsicSlowPathX86 : public SlowPathCodeX86 {
128 public:
Andreas Gampe21030dd2015-05-07 14:46:15 -0700129 explicit IntrinsicSlowPathX86(HInvoke* invoke)
130 : invoke_(invoke) { }
Mark Mendell09ed1a32015-03-25 08:30:06 -0400131
132 void EmitNativeCode(CodeGenerator* codegen_in) OVERRIDE {
133 CodeGeneratorX86* codegen = down_cast<CodeGeneratorX86*>(codegen_in);
134 __ Bind(GetEntryLabel());
135
136 SaveLiveRegisters(codegen, invoke_->GetLocations());
137
Roland Levillainec525fc2015-04-28 15:50:20 +0100138 MoveArguments(invoke_, codegen);
Mark Mendell09ed1a32015-03-25 08:30:06 -0400139
140 if (invoke_->IsInvokeStaticOrDirect()) {
Nicolas Geoffray94015b92015-06-04 18:21:04 +0100141 codegen->GenerateStaticOrDirectCall(invoke_->AsInvokeStaticOrDirect(),
142 Location::RegisterLocation(EAX));
Mingyao Yange90db122015-04-03 17:56:54 -0700143 RecordPcInfo(codegen, invoke_, invoke_->GetDexPc());
Mark Mendell09ed1a32015-03-25 08:30:06 -0400144 } else {
145 UNIMPLEMENTED(FATAL) << "Non-direct intrinsic slow-path not yet implemented";
146 UNREACHABLE();
147 }
148
149 // Copy the result back to the expected output.
150 Location out = invoke_->GetLocations()->Out();
151 if (out.IsValid()) {
152 DCHECK(out.IsRegister()); // TODO: Replace this when we support output in memory.
153 DCHECK(!invoke_->GetLocations()->GetLiveRegisters()->ContainsCoreRegister(out.reg()));
154 MoveFromReturnRegister(out, invoke_->GetType(), codegen);
155 }
156
157 RestoreLiveRegisters(codegen, invoke_->GetLocations());
158 __ jmp(GetExitLabel());
159 }
160
161 private:
162 // The instruction where this slow path is happening.
163 HInvoke* const invoke_;
164
165 DISALLOW_COPY_AND_ASSIGN(IntrinsicSlowPathX86);
166};
167
168#undef __
169#define __ assembler->
170
171static void CreateFPToIntLocations(ArenaAllocator* arena, HInvoke* invoke, bool is64bit) {
172 LocationSummary* locations = new (arena) LocationSummary(invoke,
173 LocationSummary::kNoCall,
174 kIntrinsified);
175 locations->SetInAt(0, Location::RequiresFpuRegister());
176 locations->SetOut(Location::RequiresRegister());
177 if (is64bit) {
178 locations->AddTemp(Location::RequiresFpuRegister());
179 }
180}
181
182static void CreateIntToFPLocations(ArenaAllocator* arena, HInvoke* invoke, bool is64bit) {
183 LocationSummary* locations = new (arena) LocationSummary(invoke,
184 LocationSummary::kNoCall,
185 kIntrinsified);
186 locations->SetInAt(0, Location::RequiresRegister());
187 locations->SetOut(Location::RequiresFpuRegister());
188 if (is64bit) {
189 locations->AddTemp(Location::RequiresFpuRegister());
190 locations->AddTemp(Location::RequiresFpuRegister());
191 }
192}
193
194static void MoveFPToInt(LocationSummary* locations, bool is64bit, X86Assembler* assembler) {
195 Location input = locations->InAt(0);
196 Location output = locations->Out();
197 if (is64bit) {
198 // Need to use the temporary.
199 XmmRegister temp = locations->GetTemp(0).AsFpuRegister<XmmRegister>();
200 __ movsd(temp, input.AsFpuRegister<XmmRegister>());
201 __ movd(output.AsRegisterPairLow<Register>(), temp);
202 __ psrlq(temp, Immediate(32));
203 __ movd(output.AsRegisterPairHigh<Register>(), temp);
204 } else {
205 __ movd(output.AsRegister<Register>(), input.AsFpuRegister<XmmRegister>());
206 }
207}
208
209static void MoveIntToFP(LocationSummary* locations, bool is64bit, X86Assembler* assembler) {
210 Location input = locations->InAt(0);
211 Location output = locations->Out();
212 if (is64bit) {
213 // Need to use the temporary.
214 XmmRegister temp1 = locations->GetTemp(0).AsFpuRegister<XmmRegister>();
215 XmmRegister temp2 = locations->GetTemp(1).AsFpuRegister<XmmRegister>();
216 __ movd(temp1, input.AsRegisterPairLow<Register>());
217 __ movd(temp2, input.AsRegisterPairHigh<Register>());
218 __ punpckldq(temp1, temp2);
219 __ movsd(output.AsFpuRegister<XmmRegister>(), temp1);
220 } else {
221 __ movd(output.AsFpuRegister<XmmRegister>(), input.AsRegister<Register>());
222 }
223}
224
225void IntrinsicLocationsBuilderX86::VisitDoubleDoubleToRawLongBits(HInvoke* invoke) {
226 CreateFPToIntLocations(arena_, invoke, true);
227}
228void IntrinsicLocationsBuilderX86::VisitDoubleLongBitsToDouble(HInvoke* invoke) {
229 CreateIntToFPLocations(arena_, invoke, true);
230}
231
232void IntrinsicCodeGeneratorX86::VisitDoubleDoubleToRawLongBits(HInvoke* invoke) {
233 MoveFPToInt(invoke->GetLocations(), true, GetAssembler());
234}
235void IntrinsicCodeGeneratorX86::VisitDoubleLongBitsToDouble(HInvoke* invoke) {
236 MoveIntToFP(invoke->GetLocations(), true, GetAssembler());
237}
238
239void IntrinsicLocationsBuilderX86::VisitFloatFloatToRawIntBits(HInvoke* invoke) {
240 CreateFPToIntLocations(arena_, invoke, false);
241}
242void IntrinsicLocationsBuilderX86::VisitFloatIntBitsToFloat(HInvoke* invoke) {
243 CreateIntToFPLocations(arena_, invoke, false);
244}
245
246void IntrinsicCodeGeneratorX86::VisitFloatFloatToRawIntBits(HInvoke* invoke) {
247 MoveFPToInt(invoke->GetLocations(), false, GetAssembler());
248}
249void IntrinsicCodeGeneratorX86::VisitFloatIntBitsToFloat(HInvoke* invoke) {
250 MoveIntToFP(invoke->GetLocations(), false, GetAssembler());
251}
252
253static void CreateIntToIntLocations(ArenaAllocator* arena, HInvoke* invoke) {
254 LocationSummary* locations = new (arena) LocationSummary(invoke,
255 LocationSummary::kNoCall,
256 kIntrinsified);
257 locations->SetInAt(0, Location::RequiresRegister());
258 locations->SetOut(Location::SameAsFirstInput());
259}
260
261static void CreateLongToIntLocations(ArenaAllocator* arena, HInvoke* invoke) {
262 LocationSummary* locations = new (arena) LocationSummary(invoke,
263 LocationSummary::kNoCall,
264 kIntrinsified);
265 locations->SetInAt(0, Location::RequiresRegister());
266 locations->SetOut(Location::RequiresRegister());
267}
268
269static void CreateLongToLongLocations(ArenaAllocator* arena, HInvoke* invoke) {
270 LocationSummary* locations = new (arena) LocationSummary(invoke,
271 LocationSummary::kNoCall,
272 kIntrinsified);
273 locations->SetInAt(0, Location::RequiresRegister());
274 locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
275}
276
277static void GenReverseBytes(LocationSummary* locations,
278 Primitive::Type size,
279 X86Assembler* assembler) {
280 Register out = locations->Out().AsRegister<Register>();
281
282 switch (size) {
283 case Primitive::kPrimShort:
284 // TODO: Can be done with an xchg of 8b registers. This is straight from Quick.
285 __ bswapl(out);
286 __ sarl(out, Immediate(16));
287 break;
288 case Primitive::kPrimInt:
289 __ bswapl(out);
290 break;
291 default:
292 LOG(FATAL) << "Unexpected size for reverse-bytes: " << size;
293 UNREACHABLE();
294 }
295}
296
297void IntrinsicLocationsBuilderX86::VisitIntegerReverseBytes(HInvoke* invoke) {
298 CreateIntToIntLocations(arena_, invoke);
299}
300
301void IntrinsicCodeGeneratorX86::VisitIntegerReverseBytes(HInvoke* invoke) {
302 GenReverseBytes(invoke->GetLocations(), Primitive::kPrimInt, GetAssembler());
303}
304
Mark Mendell58d25fd2015-04-03 14:52:31 -0400305void IntrinsicLocationsBuilderX86::VisitLongReverseBytes(HInvoke* invoke) {
306 CreateLongToLongLocations(arena_, invoke);
307}
308
309void IntrinsicCodeGeneratorX86::VisitLongReverseBytes(HInvoke* invoke) {
310 LocationSummary* locations = invoke->GetLocations();
311 Location input = locations->InAt(0);
312 Register input_lo = input.AsRegisterPairLow<Register>();
313 Register input_hi = input.AsRegisterPairHigh<Register>();
314 Location output = locations->Out();
315 Register output_lo = output.AsRegisterPairLow<Register>();
316 Register output_hi = output.AsRegisterPairHigh<Register>();
317
318 X86Assembler* assembler = GetAssembler();
319 // Assign the inputs to the outputs, mixing low/high.
320 __ movl(output_lo, input_hi);
321 __ movl(output_hi, input_lo);
322 __ bswapl(output_lo);
323 __ bswapl(output_hi);
324}
325
Mark Mendell09ed1a32015-03-25 08:30:06 -0400326void IntrinsicLocationsBuilderX86::VisitShortReverseBytes(HInvoke* invoke) {
327 CreateIntToIntLocations(arena_, invoke);
328}
329
330void IntrinsicCodeGeneratorX86::VisitShortReverseBytes(HInvoke* invoke) {
331 GenReverseBytes(invoke->GetLocations(), Primitive::kPrimShort, GetAssembler());
332}
333
334
335// TODO: Consider Quick's way of doing Double abs through integer operations, as the immediate we
336// need is 64b.
337
338static void CreateFloatToFloat(ArenaAllocator* arena, HInvoke* invoke) {
339 // TODO: Enable memory operations when the assembler supports them.
340 LocationSummary* locations = new (arena) LocationSummary(invoke,
341 LocationSummary::kNoCall,
342 kIntrinsified);
343 locations->SetInAt(0, Location::RequiresFpuRegister());
344 // TODO: Allow x86 to work with memory. This requires assembler support, see below.
345 // locations->SetInAt(0, Location::Any()); // X86 can work on memory directly.
346 locations->SetOut(Location::SameAsFirstInput());
347}
348
349static void MathAbsFP(LocationSummary* locations, bool is64bit, X86Assembler* assembler) {
350 Location output = locations->Out();
351
352 if (output.IsFpuRegister()) {
353 // Create the right constant on an aligned stack.
354 if (is64bit) {
355 __ subl(ESP, Immediate(8));
356 __ pushl(Immediate(0x7FFFFFFF));
357 __ pushl(Immediate(0xFFFFFFFF));
358 __ andpd(output.AsFpuRegister<XmmRegister>(), Address(ESP, 0));
359 } else {
360 __ subl(ESP, Immediate(12));
361 __ pushl(Immediate(0x7FFFFFFF));
362 __ andps(output.AsFpuRegister<XmmRegister>(), Address(ESP, 0));
363 }
364 __ addl(ESP, Immediate(16));
365 } else {
366 // TODO: update when assember support is available.
367 UNIMPLEMENTED(FATAL) << "Needs assembler support.";
368// Once assembler support is available, in-memory operations look like this:
369// if (is64bit) {
370// DCHECK(output.IsDoubleStackSlot());
371// __ andl(Address(Register(RSP), output.GetHighStackIndex(kX86WordSize)),
372// Immediate(0x7FFFFFFF));
373// } else {
374// DCHECK(output.IsStackSlot());
375// // Can use and with a literal directly.
376// __ andl(Address(Register(RSP), output.GetStackIndex()), Immediate(0x7FFFFFFF));
377// }
378 }
379}
380
381void IntrinsicLocationsBuilderX86::VisitMathAbsDouble(HInvoke* invoke) {
382 CreateFloatToFloat(arena_, invoke);
383}
384
385void IntrinsicCodeGeneratorX86::VisitMathAbsDouble(HInvoke* invoke) {
386 MathAbsFP(invoke->GetLocations(), true, GetAssembler());
387}
388
389void IntrinsicLocationsBuilderX86::VisitMathAbsFloat(HInvoke* invoke) {
390 CreateFloatToFloat(arena_, invoke);
391}
392
393void IntrinsicCodeGeneratorX86::VisitMathAbsFloat(HInvoke* invoke) {
394 MathAbsFP(invoke->GetLocations(), false, GetAssembler());
395}
396
397static void CreateAbsIntLocation(ArenaAllocator* arena, HInvoke* invoke) {
398 LocationSummary* locations = new (arena) LocationSummary(invoke,
399 LocationSummary::kNoCall,
400 kIntrinsified);
401 locations->SetInAt(0, Location::RegisterLocation(EAX));
402 locations->SetOut(Location::SameAsFirstInput());
403 locations->AddTemp(Location::RegisterLocation(EDX));
404}
405
406static void GenAbsInteger(LocationSummary* locations, X86Assembler* assembler) {
407 Location output = locations->Out();
408 Register out = output.AsRegister<Register>();
409 DCHECK_EQ(out, EAX);
410 Register temp = locations->GetTemp(0).AsRegister<Register>();
411 DCHECK_EQ(temp, EDX);
412
413 // Sign extend EAX into EDX.
414 __ cdq();
415
416 // XOR EAX with sign.
417 __ xorl(EAX, EDX);
418
419 // Subtract out sign to correct.
420 __ subl(EAX, EDX);
421
422 // The result is in EAX.
423}
424
425static void CreateAbsLongLocation(ArenaAllocator* arena, HInvoke* invoke) {
426 LocationSummary* locations = new (arena) LocationSummary(invoke,
427 LocationSummary::kNoCall,
428 kIntrinsified);
429 locations->SetInAt(0, Location::RequiresRegister());
430 locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
431 locations->AddTemp(Location::RequiresRegister());
432}
433
434static void GenAbsLong(LocationSummary* locations, X86Assembler* assembler) {
435 Location input = locations->InAt(0);
436 Register input_lo = input.AsRegisterPairLow<Register>();
437 Register input_hi = input.AsRegisterPairHigh<Register>();
438 Location output = locations->Out();
439 Register output_lo = output.AsRegisterPairLow<Register>();
440 Register output_hi = output.AsRegisterPairHigh<Register>();
441 Register temp = locations->GetTemp(0).AsRegister<Register>();
442
443 // Compute the sign into the temporary.
444 __ movl(temp, input_hi);
445 __ sarl(temp, Immediate(31));
446
447 // Store the sign into the output.
448 __ movl(output_lo, temp);
449 __ movl(output_hi, temp);
450
451 // XOR the input to the output.
452 __ xorl(output_lo, input_lo);
453 __ xorl(output_hi, input_hi);
454
455 // Subtract the sign.
456 __ subl(output_lo, temp);
457 __ sbbl(output_hi, temp);
458}
459
460void IntrinsicLocationsBuilderX86::VisitMathAbsInt(HInvoke* invoke) {
461 CreateAbsIntLocation(arena_, invoke);
462}
463
464void IntrinsicCodeGeneratorX86::VisitMathAbsInt(HInvoke* invoke) {
465 GenAbsInteger(invoke->GetLocations(), GetAssembler());
466}
467
468void IntrinsicLocationsBuilderX86::VisitMathAbsLong(HInvoke* invoke) {
469 CreateAbsLongLocation(arena_, invoke);
470}
471
472void IntrinsicCodeGeneratorX86::VisitMathAbsLong(HInvoke* invoke) {
473 GenAbsLong(invoke->GetLocations(), GetAssembler());
474}
475
476static void GenMinMaxFP(LocationSummary* locations, bool is_min, bool is_double,
477 X86Assembler* assembler) {
478 Location op1_loc = locations->InAt(0);
479 Location op2_loc = locations->InAt(1);
480 Location out_loc = locations->Out();
481 XmmRegister out = out_loc.AsFpuRegister<XmmRegister>();
482
483 // Shortcut for same input locations.
484 if (op1_loc.Equals(op2_loc)) {
485 DCHECK(out_loc.Equals(op1_loc));
486 return;
487 }
488
489 // (out := op1)
490 // out <=? op2
491 // if Nan jmp Nan_label
492 // if out is min jmp done
493 // if op2 is min jmp op2_label
494 // handle -0/+0
495 // jmp done
496 // Nan_label:
497 // out := NaN
498 // op2_label:
499 // out := op2
500 // done:
501 //
502 // This removes one jmp, but needs to copy one input (op1) to out.
503 //
504 // TODO: This is straight from Quick (except literal pool). Make NaN an out-of-line slowpath?
505
506 XmmRegister op2 = op2_loc.AsFpuRegister<XmmRegister>();
507
508 Label nan, done, op2_label;
509 if (is_double) {
510 __ ucomisd(out, op2);
511 } else {
512 __ ucomiss(out, op2);
513 }
514
515 __ j(Condition::kParityEven, &nan);
516
517 __ j(is_min ? Condition::kAbove : Condition::kBelow, &op2_label);
518 __ j(is_min ? Condition::kBelow : Condition::kAbove, &done);
519
520 // Handle 0.0/-0.0.
521 if (is_min) {
522 if (is_double) {
523 __ orpd(out, op2);
524 } else {
525 __ orps(out, op2);
526 }
527 } else {
528 if (is_double) {
529 __ andpd(out, op2);
530 } else {
531 __ andps(out, op2);
532 }
533 }
534 __ jmp(&done);
535
536 // NaN handling.
537 __ Bind(&nan);
538 if (is_double) {
539 __ pushl(Immediate(kDoubleNaNHigh));
540 __ pushl(Immediate(kDoubleNaNLow));
541 __ movsd(out, Address(ESP, 0));
542 __ addl(ESP, Immediate(8));
543 } else {
544 __ pushl(Immediate(kFloatNaN));
545 __ movss(out, Address(ESP, 0));
546 __ addl(ESP, Immediate(4));
547 }
548 __ jmp(&done);
549
550 // out := op2;
551 __ Bind(&op2_label);
552 if (is_double) {
553 __ movsd(out, op2);
554 } else {
555 __ movss(out, op2);
556 }
557
558 // Done.
559 __ Bind(&done);
560}
561
562static void CreateFPFPToFPLocations(ArenaAllocator* arena, HInvoke* invoke) {
563 LocationSummary* locations = new (arena) LocationSummary(invoke,
564 LocationSummary::kNoCall,
565 kIntrinsified);
566 locations->SetInAt(0, Location::RequiresFpuRegister());
567 locations->SetInAt(1, Location::RequiresFpuRegister());
568 // The following is sub-optimal, but all we can do for now. It would be fine to also accept
569 // the second input to be the output (we can simply swap inputs).
570 locations->SetOut(Location::SameAsFirstInput());
571}
572
573void IntrinsicLocationsBuilderX86::VisitMathMinDoubleDouble(HInvoke* invoke) {
574 CreateFPFPToFPLocations(arena_, invoke);
575}
576
577void IntrinsicCodeGeneratorX86::VisitMathMinDoubleDouble(HInvoke* invoke) {
578 GenMinMaxFP(invoke->GetLocations(), true, true, GetAssembler());
579}
580
581void IntrinsicLocationsBuilderX86::VisitMathMinFloatFloat(HInvoke* invoke) {
582 CreateFPFPToFPLocations(arena_, invoke);
583}
584
585void IntrinsicCodeGeneratorX86::VisitMathMinFloatFloat(HInvoke* invoke) {
586 GenMinMaxFP(invoke->GetLocations(), true, false, GetAssembler());
587}
588
589void IntrinsicLocationsBuilderX86::VisitMathMaxDoubleDouble(HInvoke* invoke) {
590 CreateFPFPToFPLocations(arena_, invoke);
591}
592
593void IntrinsicCodeGeneratorX86::VisitMathMaxDoubleDouble(HInvoke* invoke) {
594 GenMinMaxFP(invoke->GetLocations(), false, true, GetAssembler());
595}
596
597void IntrinsicLocationsBuilderX86::VisitMathMaxFloatFloat(HInvoke* invoke) {
598 CreateFPFPToFPLocations(arena_, invoke);
599}
600
601void IntrinsicCodeGeneratorX86::VisitMathMaxFloatFloat(HInvoke* invoke) {
602 GenMinMaxFP(invoke->GetLocations(), false, false, GetAssembler());
603}
604
605static void GenMinMax(LocationSummary* locations, bool is_min, bool is_long,
606 X86Assembler* assembler) {
607 Location op1_loc = locations->InAt(0);
608 Location op2_loc = locations->InAt(1);
609
610 // Shortcut for same input locations.
611 if (op1_loc.Equals(op2_loc)) {
612 // Can return immediately, as op1_loc == out_loc.
613 // Note: if we ever support separate registers, e.g., output into memory, we need to check for
614 // a copy here.
615 DCHECK(locations->Out().Equals(op1_loc));
616 return;
617 }
618
619 if (is_long) {
620 // Need to perform a subtract to get the sign right.
621 // op1 is already in the same location as the output.
622 Location output = locations->Out();
623 Register output_lo = output.AsRegisterPairLow<Register>();
624 Register output_hi = output.AsRegisterPairHigh<Register>();
625
626 Register op2_lo = op2_loc.AsRegisterPairLow<Register>();
627 Register op2_hi = op2_loc.AsRegisterPairHigh<Register>();
628
629 // Spare register to compute the subtraction to set condition code.
630 Register temp = locations->GetTemp(0).AsRegister<Register>();
631
632 // Subtract off op2_low.
633 __ movl(temp, output_lo);
634 __ subl(temp, op2_lo);
635
636 // Now use the same tempo and the borrow to finish the subtraction of op2_hi.
637 __ movl(temp, output_hi);
638 __ sbbl(temp, op2_hi);
639
640 // Now the condition code is correct.
641 Condition cond = is_min ? Condition::kGreaterEqual : Condition::kLess;
642 __ cmovl(cond, output_lo, op2_lo);
643 __ cmovl(cond, output_hi, op2_hi);
644 } else {
645 Register out = locations->Out().AsRegister<Register>();
646 Register op2 = op2_loc.AsRegister<Register>();
647
648 // (out := op1)
649 // out <=? op2
650 // if out is min jmp done
651 // out := op2
652 // done:
653
654 __ cmpl(out, op2);
655 Condition cond = is_min ? Condition::kGreater : Condition::kLess;
656 __ cmovl(cond, out, op2);
657 }
658}
659
660static void CreateIntIntToIntLocations(ArenaAllocator* arena, HInvoke* invoke) {
661 LocationSummary* locations = new (arena) LocationSummary(invoke,
662 LocationSummary::kNoCall,
663 kIntrinsified);
664 locations->SetInAt(0, Location::RequiresRegister());
665 locations->SetInAt(1, Location::RequiresRegister());
666 locations->SetOut(Location::SameAsFirstInput());
667}
668
669static void CreateLongLongToLongLocations(ArenaAllocator* arena, HInvoke* invoke) {
670 LocationSummary* locations = new (arena) LocationSummary(invoke,
671 LocationSummary::kNoCall,
672 kIntrinsified);
673 locations->SetInAt(0, Location::RequiresRegister());
674 locations->SetInAt(1, Location::RequiresRegister());
675 locations->SetOut(Location::SameAsFirstInput());
676 // Register to use to perform a long subtract to set cc.
677 locations->AddTemp(Location::RequiresRegister());
678}
679
680void IntrinsicLocationsBuilderX86::VisitMathMinIntInt(HInvoke* invoke) {
681 CreateIntIntToIntLocations(arena_, invoke);
682}
683
684void IntrinsicCodeGeneratorX86::VisitMathMinIntInt(HInvoke* invoke) {
685 GenMinMax(invoke->GetLocations(), true, false, GetAssembler());
686}
687
688void IntrinsicLocationsBuilderX86::VisitMathMinLongLong(HInvoke* invoke) {
689 CreateLongLongToLongLocations(arena_, invoke);
690}
691
692void IntrinsicCodeGeneratorX86::VisitMathMinLongLong(HInvoke* invoke) {
693 GenMinMax(invoke->GetLocations(), true, true, GetAssembler());
694}
695
696void IntrinsicLocationsBuilderX86::VisitMathMaxIntInt(HInvoke* invoke) {
697 CreateIntIntToIntLocations(arena_, invoke);
698}
699
700void IntrinsicCodeGeneratorX86::VisitMathMaxIntInt(HInvoke* invoke) {
701 GenMinMax(invoke->GetLocations(), false, false, GetAssembler());
702}
703
704void IntrinsicLocationsBuilderX86::VisitMathMaxLongLong(HInvoke* invoke) {
705 CreateLongLongToLongLocations(arena_, invoke);
706}
707
708void IntrinsicCodeGeneratorX86::VisitMathMaxLongLong(HInvoke* invoke) {
709 GenMinMax(invoke->GetLocations(), false, true, GetAssembler());
710}
711
712static void CreateFPToFPLocations(ArenaAllocator* arena, HInvoke* invoke) {
713 LocationSummary* locations = new (arena) LocationSummary(invoke,
714 LocationSummary::kNoCall,
715 kIntrinsified);
716 locations->SetInAt(0, Location::RequiresFpuRegister());
717 locations->SetOut(Location::RequiresFpuRegister());
718}
719
720void IntrinsicLocationsBuilderX86::VisitMathSqrt(HInvoke* invoke) {
721 CreateFPToFPLocations(arena_, invoke);
722}
723
724void IntrinsicCodeGeneratorX86::VisitMathSqrt(HInvoke* invoke) {
725 LocationSummary* locations = invoke->GetLocations();
726 XmmRegister in = locations->InAt(0).AsFpuRegister<XmmRegister>();
727 XmmRegister out = locations->Out().AsFpuRegister<XmmRegister>();
728
729 GetAssembler()->sqrtsd(out, in);
730}
731
Mark Mendellfb8d2792015-03-31 22:16:59 -0400732static void InvokeOutOfLineIntrinsic(CodeGeneratorX86* codegen, HInvoke* invoke) {
Roland Levillainec525fc2015-04-28 15:50:20 +0100733 MoveArguments(invoke, codegen);
Mark Mendellfb8d2792015-03-31 22:16:59 -0400734
735 DCHECK(invoke->IsInvokeStaticOrDirect());
Nicolas Geoffray94015b92015-06-04 18:21:04 +0100736 codegen->GenerateStaticOrDirectCall(invoke->AsInvokeStaticOrDirect(),
737 Location::RegisterLocation(EAX));
Mingyao Yange90db122015-04-03 17:56:54 -0700738 codegen->RecordPcInfo(invoke, invoke->GetDexPc());
Mark Mendellfb8d2792015-03-31 22:16:59 -0400739
740 // Copy the result back to the expected output.
741 Location out = invoke->GetLocations()->Out();
742 if (out.IsValid()) {
743 DCHECK(out.IsRegister());
744 MoveFromReturnRegister(out, invoke->GetType(), codegen);
745 }
746}
747
748static void CreateSSE41FPToFPLocations(ArenaAllocator* arena,
749 HInvoke* invoke,
750 CodeGeneratorX86* codegen) {
751 // Do we have instruction support?
752 if (codegen->GetInstructionSetFeatures().HasSSE4_1()) {
753 CreateFPToFPLocations(arena, invoke);
754 return;
755 }
756
757 // We have to fall back to a call to the intrinsic.
758 LocationSummary* locations = new (arena) LocationSummary(invoke,
759 LocationSummary::kCall);
760 InvokeRuntimeCallingConvention calling_convention;
761 locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetFpuRegisterAt(0)));
762 locations->SetOut(Location::FpuRegisterLocation(XMM0));
763 // Needs to be EAX for the invoke.
764 locations->AddTemp(Location::RegisterLocation(EAX));
765}
766
767static void GenSSE41FPToFPIntrinsic(CodeGeneratorX86* codegen,
768 HInvoke* invoke,
769 X86Assembler* assembler,
770 int round_mode) {
771 LocationSummary* locations = invoke->GetLocations();
772 if (locations->WillCall()) {
773 InvokeOutOfLineIntrinsic(codegen, invoke);
774 } else {
775 XmmRegister in = locations->InAt(0).AsFpuRegister<XmmRegister>();
776 XmmRegister out = locations->Out().AsFpuRegister<XmmRegister>();
777 __ roundsd(out, in, Immediate(round_mode));
778 }
779}
780
781void IntrinsicLocationsBuilderX86::VisitMathCeil(HInvoke* invoke) {
782 CreateSSE41FPToFPLocations(arena_, invoke, codegen_);
783}
784
785void IntrinsicCodeGeneratorX86::VisitMathCeil(HInvoke* invoke) {
786 GenSSE41FPToFPIntrinsic(codegen_, invoke, GetAssembler(), 2);
787}
788
789void IntrinsicLocationsBuilderX86::VisitMathFloor(HInvoke* invoke) {
790 CreateSSE41FPToFPLocations(arena_, invoke, codegen_);
791}
792
793void IntrinsicCodeGeneratorX86::VisitMathFloor(HInvoke* invoke) {
794 GenSSE41FPToFPIntrinsic(codegen_, invoke, GetAssembler(), 1);
795}
796
797void IntrinsicLocationsBuilderX86::VisitMathRint(HInvoke* invoke) {
798 CreateSSE41FPToFPLocations(arena_, invoke, codegen_);
799}
800
801void IntrinsicCodeGeneratorX86::VisitMathRint(HInvoke* invoke) {
802 GenSSE41FPToFPIntrinsic(codegen_, invoke, GetAssembler(), 0);
803}
804
805// Note that 32 bit x86 doesn't have the capability to inline MathRoundDouble,
806// as it needs 64 bit instructions.
807void IntrinsicLocationsBuilderX86::VisitMathRoundFloat(HInvoke* invoke) {
808 // Do we have instruction support?
809 if (codegen_->GetInstructionSetFeatures().HasSSE4_1()) {
810 LocationSummary* locations = new (arena_) LocationSummary(invoke,
811 LocationSummary::kNoCall,
812 kIntrinsified);
813 locations->SetInAt(0, Location::RequiresFpuRegister());
Nicolas Geoffrayd9b92402015-04-21 10:02:22 +0100814 locations->SetOut(Location::RequiresRegister());
Mark Mendellfb8d2792015-03-31 22:16:59 -0400815 locations->AddTemp(Location::RequiresFpuRegister());
816 locations->AddTemp(Location::RequiresFpuRegister());
817 return;
818 }
819
820 // We have to fall back to a call to the intrinsic.
821 LocationSummary* locations = new (arena_) LocationSummary(invoke,
822 LocationSummary::kCall);
823 InvokeRuntimeCallingConvention calling_convention;
824 locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetFpuRegisterAt(0)));
825 locations->SetOut(Location::RegisterLocation(EAX));
826 // Needs to be EAX for the invoke.
827 locations->AddTemp(Location::RegisterLocation(EAX));
828}
829
830void IntrinsicCodeGeneratorX86::VisitMathRoundFloat(HInvoke* invoke) {
831 LocationSummary* locations = invoke->GetLocations();
832 if (locations->WillCall()) {
833 InvokeOutOfLineIntrinsic(codegen_, invoke);
834 return;
835 }
836
837 // Implement RoundFloat as t1 = floor(input + 0.5f); convert to int.
838 XmmRegister in = locations->InAt(0).AsFpuRegister<XmmRegister>();
839 Register out = locations->Out().AsRegister<Register>();
840 XmmRegister maxInt = locations->GetTemp(0).AsFpuRegister<XmmRegister>();
841 XmmRegister inPlusPointFive = locations->GetTemp(1).AsFpuRegister<XmmRegister>();
842 Label done, nan;
843 X86Assembler* assembler = GetAssembler();
844
845 // Generate 0.5 into inPlusPointFive.
846 __ movl(out, Immediate(bit_cast<int32_t, float>(0.5f)));
847 __ movd(inPlusPointFive, out);
848
849 // Add in the input.
850 __ addss(inPlusPointFive, in);
851
852 // And truncate to an integer.
853 __ roundss(inPlusPointFive, inPlusPointFive, Immediate(1));
854
855 __ movl(out, Immediate(kPrimIntMax));
856 // maxInt = int-to-float(out)
857 __ cvtsi2ss(maxInt, out);
858
859 // if inPlusPointFive >= maxInt goto done
860 __ comiss(inPlusPointFive, maxInt);
861 __ j(kAboveEqual, &done);
862
863 // if input == NaN goto nan
864 __ j(kUnordered, &nan);
865
866 // output = float-to-int-truncate(input)
867 __ cvttss2si(out, inPlusPointFive);
868 __ jmp(&done);
869 __ Bind(&nan);
870
871 // output = 0
872 __ xorl(out, out);
873 __ Bind(&done);
874}
875
Mark Mendell09ed1a32015-03-25 08:30:06 -0400876void IntrinsicLocationsBuilderX86::VisitStringCharAt(HInvoke* invoke) {
877 // The inputs plus one temp.
878 LocationSummary* locations = new (arena_) LocationSummary(invoke,
879 LocationSummary::kCallOnSlowPath,
880 kIntrinsified);
881 locations->SetInAt(0, Location::RequiresRegister());
882 locations->SetInAt(1, Location::RequiresRegister());
883 locations->SetOut(Location::SameAsFirstInput());
Mark Mendell09ed1a32015-03-25 08:30:06 -0400884}
885
886void IntrinsicCodeGeneratorX86::VisitStringCharAt(HInvoke* invoke) {
887 LocationSummary* locations = invoke->GetLocations();
888
889 // Location of reference to data array
890 const int32_t value_offset = mirror::String::ValueOffset().Int32Value();
891 // Location of count
892 const int32_t count_offset = mirror::String::CountOffset().Int32Value();
Mark Mendell09ed1a32015-03-25 08:30:06 -0400893
894 Register obj = locations->InAt(0).AsRegister<Register>();
895 Register idx = locations->InAt(1).AsRegister<Register>();
896 Register out = locations->Out().AsRegister<Register>();
Mark Mendell09ed1a32015-03-25 08:30:06 -0400897
898 // TODO: Maybe we can support range check elimination. Overall, though, I think it's not worth
899 // the cost.
900 // TODO: For simplicity, the index parameter is requested in a register, so different from Quick
901 // we will not optimize the code for constants (which would save a register).
902
Andreas Gampe21030dd2015-05-07 14:46:15 -0700903 SlowPathCodeX86* slow_path = new (GetAllocator()) IntrinsicSlowPathX86(invoke);
Mark Mendell09ed1a32015-03-25 08:30:06 -0400904 codegen_->AddSlowPath(slow_path);
905
906 X86Assembler* assembler = GetAssembler();
907
908 __ cmpl(idx, Address(obj, count_offset));
909 codegen_->MaybeRecordImplicitNullCheck(invoke);
910 __ j(kAboveEqual, slow_path->GetEntryLabel());
911
Jeff Hao848f70a2014-01-15 13:49:50 -0800912 // out = out[2*idx].
913 __ movzxw(out, Address(out, idx, ScaleFactor::TIMES_2, value_offset));
Mark Mendell09ed1a32015-03-25 08:30:06 -0400914
915 __ Bind(slow_path->GetExitLabel());
916}
917
Nicolas Geoffrayd75948a2015-03-27 09:53:16 +0000918void IntrinsicLocationsBuilderX86::VisitStringCompareTo(HInvoke* invoke) {
919 // The inputs plus one temp.
920 LocationSummary* locations = new (arena_) LocationSummary(invoke,
921 LocationSummary::kCall,
922 kIntrinsified);
923 InvokeRuntimeCallingConvention calling_convention;
924 locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
925 locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
926 locations->SetOut(Location::RegisterLocation(EAX));
Nicolas Geoffrayd75948a2015-03-27 09:53:16 +0000927}
928
929void IntrinsicCodeGeneratorX86::VisitStringCompareTo(HInvoke* invoke) {
930 X86Assembler* assembler = GetAssembler();
931 LocationSummary* locations = invoke->GetLocations();
932
Nicolas Geoffray512e04d2015-03-27 17:21:24 +0000933 // Note that the null check must have been done earlier.
Calin Juravle641547a2015-04-21 22:08:51 +0100934 DCHECK(!invoke->CanDoImplicitNullCheckOn(invoke->InputAt(0)));
Nicolas Geoffrayd75948a2015-03-27 09:53:16 +0000935
936 Register argument = locations->InAt(1).AsRegister<Register>();
937 __ testl(argument, argument);
Andreas Gampe21030dd2015-05-07 14:46:15 -0700938 SlowPathCodeX86* slow_path = new (GetAllocator()) IntrinsicSlowPathX86(invoke);
Nicolas Geoffrayd75948a2015-03-27 09:53:16 +0000939 codegen_->AddSlowPath(slow_path);
940 __ j(kEqual, slow_path->GetEntryLabel());
941
942 __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pStringCompareTo)));
943 __ Bind(slow_path->GetExitLabel());
944}
945
Andreas Gampe21030dd2015-05-07 14:46:15 -0700946static void CreateStringIndexOfLocations(HInvoke* invoke,
947 ArenaAllocator* allocator,
948 bool start_at_zero) {
949 LocationSummary* locations = new (allocator) LocationSummary(invoke,
950 LocationSummary::kCallOnSlowPath,
951 kIntrinsified);
952 // The data needs to be in EDI for scasw. So request that the string is there, anyways.
953 locations->SetInAt(0, Location::RegisterLocation(EDI));
954 // If we look for a constant char, we'll still have to copy it into EAX. So just request the
955 // allocator to do that, anyways. We can still do the constant check by checking the parameter
956 // of the instruction explicitly.
957 // Note: This works as we don't clobber EAX anywhere.
958 locations->SetInAt(1, Location::RegisterLocation(EAX));
959 if (!start_at_zero) {
960 locations->SetInAt(2, Location::RequiresRegister()); // The starting index.
961 }
962 // As we clobber EDI during execution anyways, also use it as the output.
963 locations->SetOut(Location::SameAsFirstInput());
964
965 // repne scasw uses ECX as the counter.
966 locations->AddTemp(Location::RegisterLocation(ECX));
967 // Need another temporary to be able to compute the result.
968 locations->AddTemp(Location::RequiresRegister());
969}
970
971static void GenerateStringIndexOf(HInvoke* invoke,
972 X86Assembler* assembler,
973 CodeGeneratorX86* codegen,
974 ArenaAllocator* allocator,
975 bool start_at_zero) {
976 LocationSummary* locations = invoke->GetLocations();
977
978 // Note that the null check must have been done earlier.
979 DCHECK(!invoke->CanDoImplicitNullCheckOn(invoke->InputAt(0)));
980
981 Register string_obj = locations->InAt(0).AsRegister<Register>();
982 Register search_value = locations->InAt(1).AsRegister<Register>();
983 Register counter = locations->GetTemp(0).AsRegister<Register>();
984 Register string_length = locations->GetTemp(1).AsRegister<Register>();
985 Register out = locations->Out().AsRegister<Register>();
986
987 // Check our assumptions for registers.
988 DCHECK_EQ(string_obj, EDI);
989 DCHECK_EQ(search_value, EAX);
990 DCHECK_EQ(counter, ECX);
991 DCHECK_EQ(out, EDI);
992
993 // Check for code points > 0xFFFF. Either a slow-path check when we don't know statically,
994 // or directly dispatch if we have a constant.
995 SlowPathCodeX86* slow_path = nullptr;
996 if (invoke->InputAt(1)->IsIntConstant()) {
997 if (static_cast<uint32_t>(invoke->InputAt(1)->AsIntConstant()->GetValue()) >
998 std::numeric_limits<uint16_t>::max()) {
999 // Always needs the slow-path. We could directly dispatch to it, but this case should be
1000 // rare, so for simplicity just put the full slow-path down and branch unconditionally.
1001 slow_path = new (allocator) IntrinsicSlowPathX86(invoke);
1002 codegen->AddSlowPath(slow_path);
1003 __ jmp(slow_path->GetEntryLabel());
1004 __ Bind(slow_path->GetExitLabel());
1005 return;
1006 }
1007 } else {
1008 __ cmpl(search_value, Immediate(std::numeric_limits<uint16_t>::max()));
1009 slow_path = new (allocator) IntrinsicSlowPathX86(invoke);
1010 codegen->AddSlowPath(slow_path);
1011 __ j(kAbove, slow_path->GetEntryLabel());
1012 }
1013
1014 // From here down, we know that we are looking for a char that fits in 16 bits.
1015 // Location of reference to data array within the String object.
1016 int32_t value_offset = mirror::String::ValueOffset().Int32Value();
1017 // Location of count within the String object.
1018 int32_t count_offset = mirror::String::CountOffset().Int32Value();
1019
1020 // Load string length, i.e., the count field of the string.
1021 __ movl(string_length, Address(string_obj, count_offset));
1022
1023 // Do a zero-length check.
1024 // TODO: Support jecxz.
1025 Label not_found_label;
1026 __ testl(string_length, string_length);
1027 __ j(kEqual, &not_found_label);
1028
1029 if (start_at_zero) {
1030 // Number of chars to scan is the same as the string length.
1031 __ movl(counter, string_length);
1032
1033 // Move to the start of the string.
1034 __ addl(string_obj, Immediate(value_offset));
1035 } else {
1036 Register start_index = locations->InAt(2).AsRegister<Register>();
1037
1038 // Do a start_index check.
1039 __ cmpl(start_index, string_length);
1040 __ j(kGreaterEqual, &not_found_label);
1041
1042 // Ensure we have a start index >= 0;
1043 __ xorl(counter, counter);
1044 __ cmpl(start_index, Immediate(0));
1045 __ cmovl(kGreater, counter, start_index);
1046
1047 // Move to the start of the string: string_obj + value_offset + 2 * start_index.
1048 __ leal(string_obj, Address(string_obj, counter, ScaleFactor::TIMES_2, value_offset));
1049
1050 // Now update ecx (the repne scasw work counter). We have string.length - start_index left to
1051 // compare.
1052 __ negl(counter);
1053 __ leal(counter, Address(string_length, counter, ScaleFactor::TIMES_1, 0));
1054 }
1055
1056 // Everything is set up for repne scasw:
1057 // * Comparison address in EDI.
1058 // * Counter in ECX.
1059 __ repne_scasw();
1060
1061 // Did we find a match?
1062 __ j(kNotEqual, &not_found_label);
1063
1064 // Yes, we matched. Compute the index of the result.
1065 __ subl(string_length, counter);
1066 __ leal(out, Address(string_length, -1));
1067
1068 Label done;
1069 __ jmp(&done);
1070
1071 // Failed to match; return -1.
1072 __ Bind(&not_found_label);
1073 __ movl(out, Immediate(-1));
1074
1075 // And join up at the end.
1076 __ Bind(&done);
1077 if (slow_path != nullptr) {
1078 __ Bind(slow_path->GetExitLabel());
1079 }
1080}
1081
1082void IntrinsicLocationsBuilderX86::VisitStringIndexOf(HInvoke* invoke) {
1083 CreateStringIndexOfLocations(invoke, arena_, true);
1084}
1085
1086void IntrinsicCodeGeneratorX86::VisitStringIndexOf(HInvoke* invoke) {
1087 GenerateStringIndexOf(invoke, GetAssembler(), codegen_, GetAllocator(), true);
1088}
1089
1090void IntrinsicLocationsBuilderX86::VisitStringIndexOfAfter(HInvoke* invoke) {
1091 CreateStringIndexOfLocations(invoke, arena_, false);
1092}
1093
1094void IntrinsicCodeGeneratorX86::VisitStringIndexOfAfter(HInvoke* invoke) {
1095 GenerateStringIndexOf(invoke, GetAssembler(), codegen_, GetAllocator(), false);
1096}
1097
Jeff Hao848f70a2014-01-15 13:49:50 -08001098void IntrinsicLocationsBuilderX86::VisitStringNewStringFromBytes(HInvoke* invoke) {
1099 LocationSummary* locations = new (arena_) LocationSummary(invoke,
1100 LocationSummary::kCall,
1101 kIntrinsified);
1102 InvokeRuntimeCallingConvention calling_convention;
1103 locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
1104 locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
1105 locations->SetInAt(2, Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
1106 locations->SetInAt(3, Location::RegisterLocation(calling_convention.GetRegisterAt(3)));
1107 locations->SetOut(Location::RegisterLocation(EAX));
Jeff Hao848f70a2014-01-15 13:49:50 -08001108}
1109
1110void IntrinsicCodeGeneratorX86::VisitStringNewStringFromBytes(HInvoke* invoke) {
1111 X86Assembler* assembler = GetAssembler();
1112 LocationSummary* locations = invoke->GetLocations();
1113
1114 Register byte_array = locations->InAt(0).AsRegister<Register>();
1115 __ testl(byte_array, byte_array);
Andreas Gampe21030dd2015-05-07 14:46:15 -07001116 SlowPathCodeX86* slow_path = new (GetAllocator()) IntrinsicSlowPathX86(invoke);
Jeff Hao848f70a2014-01-15 13:49:50 -08001117 codegen_->AddSlowPath(slow_path);
1118 __ j(kEqual, slow_path->GetEntryLabel());
1119
1120 __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pAllocStringFromBytes)));
1121 codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
1122 __ Bind(slow_path->GetExitLabel());
1123}
1124
1125void IntrinsicLocationsBuilderX86::VisitStringNewStringFromChars(HInvoke* invoke) {
1126 LocationSummary* locations = new (arena_) LocationSummary(invoke,
1127 LocationSummary::kCall,
1128 kIntrinsified);
1129 InvokeRuntimeCallingConvention calling_convention;
1130 locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
1131 locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
1132 locations->SetInAt(2, Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
1133 locations->SetOut(Location::RegisterLocation(EAX));
1134}
1135
1136void IntrinsicCodeGeneratorX86::VisitStringNewStringFromChars(HInvoke* invoke) {
1137 X86Assembler* assembler = GetAssembler();
1138
1139 __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pAllocStringFromChars)));
1140 codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
1141}
1142
1143void IntrinsicLocationsBuilderX86::VisitStringNewStringFromString(HInvoke* invoke) {
1144 LocationSummary* locations = new (arena_) LocationSummary(invoke,
1145 LocationSummary::kCall,
1146 kIntrinsified);
1147 InvokeRuntimeCallingConvention calling_convention;
1148 locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
1149 locations->SetOut(Location::RegisterLocation(EAX));
Jeff Hao848f70a2014-01-15 13:49:50 -08001150}
1151
1152void IntrinsicCodeGeneratorX86::VisitStringNewStringFromString(HInvoke* invoke) {
1153 X86Assembler* assembler = GetAssembler();
1154 LocationSummary* locations = invoke->GetLocations();
1155
1156 Register string_to_copy = locations->InAt(0).AsRegister<Register>();
1157 __ testl(string_to_copy, string_to_copy);
Andreas Gampe21030dd2015-05-07 14:46:15 -07001158 SlowPathCodeX86* slow_path = new (GetAllocator()) IntrinsicSlowPathX86(invoke);
Jeff Hao848f70a2014-01-15 13:49:50 -08001159 codegen_->AddSlowPath(slow_path);
1160 __ j(kEqual, slow_path->GetEntryLabel());
1161
1162 __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pAllocStringFromString)));
1163 codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
1164 __ Bind(slow_path->GetExitLabel());
1165}
1166
Mark Mendell09ed1a32015-03-25 08:30:06 -04001167static void GenPeek(LocationSummary* locations, Primitive::Type size, X86Assembler* assembler) {
1168 Register address = locations->InAt(0).AsRegisterPairLow<Register>();
1169 Location out_loc = locations->Out();
1170 // x86 allows unaligned access. We do not have to check the input or use specific instructions
1171 // to avoid a SIGBUS.
1172 switch (size) {
1173 case Primitive::kPrimByte:
1174 __ movsxb(out_loc.AsRegister<Register>(), Address(address, 0));
1175 break;
1176 case Primitive::kPrimShort:
1177 __ movsxw(out_loc.AsRegister<Register>(), Address(address, 0));
1178 break;
1179 case Primitive::kPrimInt:
1180 __ movl(out_loc.AsRegister<Register>(), Address(address, 0));
1181 break;
1182 case Primitive::kPrimLong:
1183 __ movl(out_loc.AsRegisterPairLow<Register>(), Address(address, 0));
1184 __ movl(out_loc.AsRegisterPairHigh<Register>(), Address(address, 4));
1185 break;
1186 default:
1187 LOG(FATAL) << "Type not recognized for peek: " << size;
1188 UNREACHABLE();
1189 }
1190}
1191
1192void IntrinsicLocationsBuilderX86::VisitMemoryPeekByte(HInvoke* invoke) {
1193 CreateLongToIntLocations(arena_, invoke);
1194}
1195
1196void IntrinsicCodeGeneratorX86::VisitMemoryPeekByte(HInvoke* invoke) {
1197 GenPeek(invoke->GetLocations(), Primitive::kPrimByte, GetAssembler());
1198}
1199
1200void IntrinsicLocationsBuilderX86::VisitMemoryPeekIntNative(HInvoke* invoke) {
1201 CreateLongToIntLocations(arena_, invoke);
1202}
1203
1204void IntrinsicCodeGeneratorX86::VisitMemoryPeekIntNative(HInvoke* invoke) {
1205 GenPeek(invoke->GetLocations(), Primitive::kPrimInt, GetAssembler());
1206}
1207
1208void IntrinsicLocationsBuilderX86::VisitMemoryPeekLongNative(HInvoke* invoke) {
1209 CreateLongToLongLocations(arena_, invoke);
1210}
1211
1212void IntrinsicCodeGeneratorX86::VisitMemoryPeekLongNative(HInvoke* invoke) {
1213 GenPeek(invoke->GetLocations(), Primitive::kPrimLong, GetAssembler());
1214}
1215
1216void IntrinsicLocationsBuilderX86::VisitMemoryPeekShortNative(HInvoke* invoke) {
1217 CreateLongToIntLocations(arena_, invoke);
1218}
1219
1220void IntrinsicCodeGeneratorX86::VisitMemoryPeekShortNative(HInvoke* invoke) {
1221 GenPeek(invoke->GetLocations(), Primitive::kPrimShort, GetAssembler());
1222}
1223
1224static void CreateLongIntToVoidLocations(ArenaAllocator* arena, Primitive::Type size,
1225 HInvoke* invoke) {
1226 LocationSummary* locations = new (arena) LocationSummary(invoke,
1227 LocationSummary::kNoCall,
1228 kIntrinsified);
1229 locations->SetInAt(0, Location::RequiresRegister());
Roland Levillain4c0eb422015-04-24 16:43:49 +01001230 HInstruction* value = invoke->InputAt(1);
Mark Mendell09ed1a32015-03-25 08:30:06 -04001231 if (size == Primitive::kPrimByte) {
1232 locations->SetInAt(1, Location::ByteRegisterOrConstant(EDX, value));
1233 } else {
1234 locations->SetInAt(1, Location::RegisterOrConstant(value));
1235 }
1236}
1237
1238static void GenPoke(LocationSummary* locations, Primitive::Type size, X86Assembler* assembler) {
1239 Register address = locations->InAt(0).AsRegisterPairLow<Register>();
1240 Location value_loc = locations->InAt(1);
1241 // x86 allows unaligned access. We do not have to check the input or use specific instructions
1242 // to avoid a SIGBUS.
1243 switch (size) {
1244 case Primitive::kPrimByte:
1245 if (value_loc.IsConstant()) {
1246 __ movb(Address(address, 0),
1247 Immediate(value_loc.GetConstant()->AsIntConstant()->GetValue()));
1248 } else {
1249 __ movb(Address(address, 0), value_loc.AsRegister<ByteRegister>());
1250 }
1251 break;
1252 case Primitive::kPrimShort:
1253 if (value_loc.IsConstant()) {
1254 __ movw(Address(address, 0),
1255 Immediate(value_loc.GetConstant()->AsIntConstant()->GetValue()));
1256 } else {
1257 __ movw(Address(address, 0), value_loc.AsRegister<Register>());
1258 }
1259 break;
1260 case Primitive::kPrimInt:
1261 if (value_loc.IsConstant()) {
1262 __ movl(Address(address, 0),
1263 Immediate(value_loc.GetConstant()->AsIntConstant()->GetValue()));
1264 } else {
1265 __ movl(Address(address, 0), value_loc.AsRegister<Register>());
1266 }
1267 break;
1268 case Primitive::kPrimLong:
1269 if (value_loc.IsConstant()) {
1270 int64_t value = value_loc.GetConstant()->AsLongConstant()->GetValue();
1271 __ movl(Address(address, 0), Immediate(Low32Bits(value)));
1272 __ movl(Address(address, 4), Immediate(High32Bits(value)));
1273 } else {
1274 __ movl(Address(address, 0), value_loc.AsRegisterPairLow<Register>());
1275 __ movl(Address(address, 4), value_loc.AsRegisterPairHigh<Register>());
1276 }
1277 break;
1278 default:
1279 LOG(FATAL) << "Type not recognized for poke: " << size;
1280 UNREACHABLE();
1281 }
1282}
1283
1284void IntrinsicLocationsBuilderX86::VisitMemoryPokeByte(HInvoke* invoke) {
1285 CreateLongIntToVoidLocations(arena_, Primitive::kPrimByte, invoke);
1286}
1287
1288void IntrinsicCodeGeneratorX86::VisitMemoryPokeByte(HInvoke* invoke) {
1289 GenPoke(invoke->GetLocations(), Primitive::kPrimByte, GetAssembler());
1290}
1291
1292void IntrinsicLocationsBuilderX86::VisitMemoryPokeIntNative(HInvoke* invoke) {
1293 CreateLongIntToVoidLocations(arena_, Primitive::kPrimInt, invoke);
1294}
1295
1296void IntrinsicCodeGeneratorX86::VisitMemoryPokeIntNative(HInvoke* invoke) {
1297 GenPoke(invoke->GetLocations(), Primitive::kPrimInt, GetAssembler());
1298}
1299
1300void IntrinsicLocationsBuilderX86::VisitMemoryPokeLongNative(HInvoke* invoke) {
1301 CreateLongIntToVoidLocations(arena_, Primitive::kPrimLong, invoke);
1302}
1303
1304void IntrinsicCodeGeneratorX86::VisitMemoryPokeLongNative(HInvoke* invoke) {
1305 GenPoke(invoke->GetLocations(), Primitive::kPrimLong, GetAssembler());
1306}
1307
1308void IntrinsicLocationsBuilderX86::VisitMemoryPokeShortNative(HInvoke* invoke) {
1309 CreateLongIntToVoidLocations(arena_, Primitive::kPrimShort, invoke);
1310}
1311
1312void IntrinsicCodeGeneratorX86::VisitMemoryPokeShortNative(HInvoke* invoke) {
1313 GenPoke(invoke->GetLocations(), Primitive::kPrimShort, GetAssembler());
1314}
1315
1316void IntrinsicLocationsBuilderX86::VisitThreadCurrentThread(HInvoke* invoke) {
1317 LocationSummary* locations = new (arena_) LocationSummary(invoke,
1318 LocationSummary::kNoCall,
1319 kIntrinsified);
1320 locations->SetOut(Location::RequiresRegister());
1321}
1322
1323void IntrinsicCodeGeneratorX86::VisitThreadCurrentThread(HInvoke* invoke) {
1324 Register out = invoke->GetLocations()->Out().AsRegister<Register>();
1325 GetAssembler()->fs()->movl(out, Address::Absolute(Thread::PeerOffset<kX86WordSize>()));
1326}
1327
1328static void GenUnsafeGet(LocationSummary* locations, Primitive::Type type,
1329 bool is_volatile, X86Assembler* assembler) {
1330 Register base = locations->InAt(1).AsRegister<Register>();
1331 Register offset = locations->InAt(2).AsRegisterPairLow<Register>();
1332 Location output = locations->Out();
1333
1334 switch (type) {
1335 case Primitive::kPrimInt:
1336 case Primitive::kPrimNot:
1337 __ movl(output.AsRegister<Register>(), Address(base, offset, ScaleFactor::TIMES_1, 0));
1338 break;
1339
1340 case Primitive::kPrimLong: {
1341 Register output_lo = output.AsRegisterPairLow<Register>();
1342 Register output_hi = output.AsRegisterPairHigh<Register>();
1343 if (is_volatile) {
1344 // Need to use a XMM to read atomically.
1345 XmmRegister temp = locations->GetTemp(0).AsFpuRegister<XmmRegister>();
1346 __ movsd(temp, Address(base, offset, ScaleFactor::TIMES_1, 0));
1347 __ movd(output_lo, temp);
1348 __ psrlq(temp, Immediate(32));
1349 __ movd(output_hi, temp);
1350 } else {
1351 __ movl(output_lo, Address(base, offset, ScaleFactor::TIMES_1, 0));
1352 __ movl(output_hi, Address(base, offset, ScaleFactor::TIMES_1, 4));
1353 }
1354 }
1355 break;
1356
1357 default:
1358 LOG(FATAL) << "Unsupported op size " << type;
1359 UNREACHABLE();
1360 }
1361}
1362
1363static void CreateIntIntIntToIntLocations(ArenaAllocator* arena, HInvoke* invoke,
1364 bool is_long, bool is_volatile) {
1365 LocationSummary* locations = new (arena) LocationSummary(invoke,
1366 LocationSummary::kNoCall,
1367 kIntrinsified);
1368 locations->SetInAt(0, Location::NoLocation()); // Unused receiver.
1369 locations->SetInAt(1, Location::RequiresRegister());
1370 locations->SetInAt(2, Location::RequiresRegister());
1371 if (is_long) {
1372 if (is_volatile) {
1373 // Need to use XMM to read volatile.
1374 locations->AddTemp(Location::RequiresFpuRegister());
1375 locations->SetOut(Location::RequiresRegister());
1376 } else {
1377 locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
1378 }
1379 } else {
1380 locations->SetOut(Location::RequiresRegister());
1381 }
1382}
1383
1384void IntrinsicLocationsBuilderX86::VisitUnsafeGet(HInvoke* invoke) {
1385 CreateIntIntIntToIntLocations(arena_, invoke, false, false);
1386}
1387void IntrinsicLocationsBuilderX86::VisitUnsafeGetVolatile(HInvoke* invoke) {
1388 CreateIntIntIntToIntLocations(arena_, invoke, false, true);
1389}
1390void IntrinsicLocationsBuilderX86::VisitUnsafeGetLong(HInvoke* invoke) {
1391 CreateIntIntIntToIntLocations(arena_, invoke, false, false);
1392}
1393void IntrinsicLocationsBuilderX86::VisitUnsafeGetLongVolatile(HInvoke* invoke) {
1394 CreateIntIntIntToIntLocations(arena_, invoke, true, true);
1395}
1396void IntrinsicLocationsBuilderX86::VisitUnsafeGetObject(HInvoke* invoke) {
1397 CreateIntIntIntToIntLocations(arena_, invoke, false, false);
1398}
1399void IntrinsicLocationsBuilderX86::VisitUnsafeGetObjectVolatile(HInvoke* invoke) {
1400 CreateIntIntIntToIntLocations(arena_, invoke, false, true);
1401}
1402
1403
1404void IntrinsicCodeGeneratorX86::VisitUnsafeGet(HInvoke* invoke) {
1405 GenUnsafeGet(invoke->GetLocations(), Primitive::kPrimInt, false, GetAssembler());
1406}
1407void IntrinsicCodeGeneratorX86::VisitUnsafeGetVolatile(HInvoke* invoke) {
1408 GenUnsafeGet(invoke->GetLocations(), Primitive::kPrimInt, true, GetAssembler());
1409}
1410void IntrinsicCodeGeneratorX86::VisitUnsafeGetLong(HInvoke* invoke) {
1411 GenUnsafeGet(invoke->GetLocations(), Primitive::kPrimLong, false, GetAssembler());
1412}
1413void IntrinsicCodeGeneratorX86::VisitUnsafeGetLongVolatile(HInvoke* invoke) {
1414 GenUnsafeGet(invoke->GetLocations(), Primitive::kPrimLong, true, GetAssembler());
1415}
1416void IntrinsicCodeGeneratorX86::VisitUnsafeGetObject(HInvoke* invoke) {
1417 GenUnsafeGet(invoke->GetLocations(), Primitive::kPrimNot, false, GetAssembler());
1418}
1419void IntrinsicCodeGeneratorX86::VisitUnsafeGetObjectVolatile(HInvoke* invoke) {
1420 GenUnsafeGet(invoke->GetLocations(), Primitive::kPrimNot, true, GetAssembler());
1421}
1422
1423
1424static void CreateIntIntIntIntToVoidPlusTempsLocations(ArenaAllocator* arena,
1425 Primitive::Type type,
1426 HInvoke* invoke,
1427 bool is_volatile) {
1428 LocationSummary* locations = new (arena) LocationSummary(invoke,
1429 LocationSummary::kNoCall,
1430 kIntrinsified);
1431 locations->SetInAt(0, Location::NoLocation()); // Unused receiver.
1432 locations->SetInAt(1, Location::RequiresRegister());
1433 locations->SetInAt(2, Location::RequiresRegister());
1434 locations->SetInAt(3, Location::RequiresRegister());
1435 if (type == Primitive::kPrimNot) {
1436 // Need temp registers for card-marking.
1437 locations->AddTemp(Location::RequiresRegister());
1438 // Ensure the value is in a byte register.
1439 locations->AddTemp(Location::RegisterLocation(ECX));
1440 } else if (type == Primitive::kPrimLong && is_volatile) {
1441 locations->AddTemp(Location::RequiresFpuRegister());
1442 locations->AddTemp(Location::RequiresFpuRegister());
1443 }
1444}
1445
1446void IntrinsicLocationsBuilderX86::VisitUnsafePut(HInvoke* invoke) {
1447 CreateIntIntIntIntToVoidPlusTempsLocations(arena_, Primitive::kPrimInt, invoke, false);
1448}
1449void IntrinsicLocationsBuilderX86::VisitUnsafePutOrdered(HInvoke* invoke) {
1450 CreateIntIntIntIntToVoidPlusTempsLocations(arena_, Primitive::kPrimInt, invoke, false);
1451}
1452void IntrinsicLocationsBuilderX86::VisitUnsafePutVolatile(HInvoke* invoke) {
1453 CreateIntIntIntIntToVoidPlusTempsLocations(arena_, Primitive::kPrimInt, invoke, true);
1454}
1455void IntrinsicLocationsBuilderX86::VisitUnsafePutObject(HInvoke* invoke) {
1456 CreateIntIntIntIntToVoidPlusTempsLocations(arena_, Primitive::kPrimNot, invoke, false);
1457}
1458void IntrinsicLocationsBuilderX86::VisitUnsafePutObjectOrdered(HInvoke* invoke) {
1459 CreateIntIntIntIntToVoidPlusTempsLocations(arena_, Primitive::kPrimNot, invoke, false);
1460}
1461void IntrinsicLocationsBuilderX86::VisitUnsafePutObjectVolatile(HInvoke* invoke) {
1462 CreateIntIntIntIntToVoidPlusTempsLocations(arena_, Primitive::kPrimNot, invoke, true);
1463}
1464void IntrinsicLocationsBuilderX86::VisitUnsafePutLong(HInvoke* invoke) {
1465 CreateIntIntIntIntToVoidPlusTempsLocations(arena_, Primitive::kPrimLong, invoke, false);
1466}
1467void IntrinsicLocationsBuilderX86::VisitUnsafePutLongOrdered(HInvoke* invoke) {
1468 CreateIntIntIntIntToVoidPlusTempsLocations(arena_, Primitive::kPrimLong, invoke, false);
1469}
1470void IntrinsicLocationsBuilderX86::VisitUnsafePutLongVolatile(HInvoke* invoke) {
1471 CreateIntIntIntIntToVoidPlusTempsLocations(arena_, Primitive::kPrimLong, invoke, true);
1472}
1473
1474// We don't care for ordered: it requires an AnyStore barrier, which is already given by the x86
1475// memory model.
1476static void GenUnsafePut(LocationSummary* locations,
1477 Primitive::Type type,
1478 bool is_volatile,
1479 CodeGeneratorX86* codegen) {
1480 X86Assembler* assembler = reinterpret_cast<X86Assembler*>(codegen->GetAssembler());
1481 Register base = locations->InAt(1).AsRegister<Register>();
1482 Register offset = locations->InAt(2).AsRegisterPairLow<Register>();
1483 Location value_loc = locations->InAt(3);
1484
1485 if (type == Primitive::kPrimLong) {
1486 Register value_lo = value_loc.AsRegisterPairLow<Register>();
1487 Register value_hi = value_loc.AsRegisterPairHigh<Register>();
1488 if (is_volatile) {
1489 XmmRegister temp1 = locations->GetTemp(0).AsFpuRegister<XmmRegister>();
1490 XmmRegister temp2 = locations->GetTemp(1).AsFpuRegister<XmmRegister>();
1491 __ movd(temp1, value_lo);
1492 __ movd(temp2, value_hi);
1493 __ punpckldq(temp1, temp2);
1494 __ movsd(Address(base, offset, ScaleFactor::TIMES_1, 0), temp1);
1495 } else {
1496 __ movl(Address(base, offset, ScaleFactor::TIMES_1, 0), value_lo);
1497 __ movl(Address(base, offset, ScaleFactor::TIMES_1, 4), value_hi);
1498 }
1499 } else {
1500 __ movl(Address(base, offset, ScaleFactor::TIMES_1, 0), value_loc.AsRegister<Register>());
1501 }
1502
1503 if (is_volatile) {
1504 __ mfence();
1505 }
1506
1507 if (type == Primitive::kPrimNot) {
Nicolas Geoffray07276db2015-05-18 14:22:09 +01001508 bool value_can_be_null = true; // TODO: Worth finding out this information?
Mark Mendell09ed1a32015-03-25 08:30:06 -04001509 codegen->MarkGCCard(locations->GetTemp(0).AsRegister<Register>(),
1510 locations->GetTemp(1).AsRegister<Register>(),
1511 base,
Nicolas Geoffray07276db2015-05-18 14:22:09 +01001512 value_loc.AsRegister<Register>(),
1513 value_can_be_null);
Mark Mendell09ed1a32015-03-25 08:30:06 -04001514 }
1515}
1516
1517void IntrinsicCodeGeneratorX86::VisitUnsafePut(HInvoke* invoke) {
1518 GenUnsafePut(invoke->GetLocations(), Primitive::kPrimInt, false, codegen_);
1519}
1520void IntrinsicCodeGeneratorX86::VisitUnsafePutOrdered(HInvoke* invoke) {
1521 GenUnsafePut(invoke->GetLocations(), Primitive::kPrimInt, false, codegen_);
1522}
1523void IntrinsicCodeGeneratorX86::VisitUnsafePutVolatile(HInvoke* invoke) {
1524 GenUnsafePut(invoke->GetLocations(), Primitive::kPrimInt, true, codegen_);
1525}
1526void IntrinsicCodeGeneratorX86::VisitUnsafePutObject(HInvoke* invoke) {
1527 GenUnsafePut(invoke->GetLocations(), Primitive::kPrimNot, false, codegen_);
1528}
1529void IntrinsicCodeGeneratorX86::VisitUnsafePutObjectOrdered(HInvoke* invoke) {
1530 GenUnsafePut(invoke->GetLocations(), Primitive::kPrimNot, false, codegen_);
1531}
1532void IntrinsicCodeGeneratorX86::VisitUnsafePutObjectVolatile(HInvoke* invoke) {
1533 GenUnsafePut(invoke->GetLocations(), Primitive::kPrimNot, true, codegen_);
1534}
1535void IntrinsicCodeGeneratorX86::VisitUnsafePutLong(HInvoke* invoke) {
1536 GenUnsafePut(invoke->GetLocations(), Primitive::kPrimLong, false, codegen_);
1537}
1538void IntrinsicCodeGeneratorX86::VisitUnsafePutLongOrdered(HInvoke* invoke) {
1539 GenUnsafePut(invoke->GetLocations(), Primitive::kPrimLong, false, codegen_);
1540}
1541void IntrinsicCodeGeneratorX86::VisitUnsafePutLongVolatile(HInvoke* invoke) {
1542 GenUnsafePut(invoke->GetLocations(), Primitive::kPrimLong, true, codegen_);
1543}
1544
Mark Mendell58d25fd2015-04-03 14:52:31 -04001545static void CreateIntIntIntIntIntToInt(ArenaAllocator* arena, Primitive::Type type,
1546 HInvoke* invoke) {
1547 LocationSummary* locations = new (arena) LocationSummary(invoke,
1548 LocationSummary::kNoCall,
1549 kIntrinsified);
1550 locations->SetInAt(0, Location::NoLocation()); // Unused receiver.
1551 locations->SetInAt(1, Location::RequiresRegister());
1552 // Offset is a long, but in 32 bit mode, we only need the low word.
1553 // Can we update the invoke here to remove a TypeConvert to Long?
1554 locations->SetInAt(2, Location::RequiresRegister());
1555 // Expected value must be in EAX or EDX:EAX.
1556 // For long, new value must be in ECX:EBX.
1557 if (type == Primitive::kPrimLong) {
1558 locations->SetInAt(3, Location::RegisterPairLocation(EAX, EDX));
1559 locations->SetInAt(4, Location::RegisterPairLocation(EBX, ECX));
1560 } else {
1561 locations->SetInAt(3, Location::RegisterLocation(EAX));
1562 locations->SetInAt(4, Location::RequiresRegister());
1563 }
1564
1565 // Force a byte register for the output.
1566 locations->SetOut(Location::RegisterLocation(EAX));
1567 if (type == Primitive::kPrimNot) {
1568 // Need temp registers for card-marking.
1569 locations->AddTemp(Location::RequiresRegister());
1570 // Need a byte register for marking.
1571 locations->AddTemp(Location::RegisterLocation(ECX));
1572 }
1573}
1574
1575void IntrinsicLocationsBuilderX86::VisitUnsafeCASInt(HInvoke* invoke) {
1576 CreateIntIntIntIntIntToInt(arena_, Primitive::kPrimInt, invoke);
1577}
1578
1579void IntrinsicLocationsBuilderX86::VisitUnsafeCASLong(HInvoke* invoke) {
1580 CreateIntIntIntIntIntToInt(arena_, Primitive::kPrimLong, invoke);
1581}
1582
1583void IntrinsicLocationsBuilderX86::VisitUnsafeCASObject(HInvoke* invoke) {
1584 CreateIntIntIntIntIntToInt(arena_, Primitive::kPrimNot, invoke);
1585}
1586
1587static void GenCAS(Primitive::Type type, HInvoke* invoke, CodeGeneratorX86* codegen) {
1588 X86Assembler* assembler =
1589 reinterpret_cast<X86Assembler*>(codegen->GetAssembler());
1590 LocationSummary* locations = invoke->GetLocations();
1591
1592 Register base = locations->InAt(1).AsRegister<Register>();
1593 Register offset = locations->InAt(2).AsRegisterPairLow<Register>();
1594 Location out = locations->Out();
1595 DCHECK_EQ(out.AsRegister<Register>(), EAX);
1596
1597 if (type == Primitive::kPrimLong) {
1598 DCHECK_EQ(locations->InAt(3).AsRegisterPairLow<Register>(), EAX);
1599 DCHECK_EQ(locations->InAt(3).AsRegisterPairHigh<Register>(), EDX);
1600 DCHECK_EQ(locations->InAt(4).AsRegisterPairLow<Register>(), EBX);
1601 DCHECK_EQ(locations->InAt(4).AsRegisterPairHigh<Register>(), ECX);
1602 __ LockCmpxchg8b(Address(base, offset, TIMES_1, 0));
1603 } else {
1604 // Integer or object.
1605 DCHECK_EQ(locations->InAt(3).AsRegister<Register>(), EAX);
1606 Register value = locations->InAt(4).AsRegister<Register>();
1607 if (type == Primitive::kPrimNot) {
1608 // Mark card for object assuming new value is stored.
Nicolas Geoffray07276db2015-05-18 14:22:09 +01001609 bool value_can_be_null = true; // TODO: Worth finding out this information?
Mark Mendell58d25fd2015-04-03 14:52:31 -04001610 codegen->MarkGCCard(locations->GetTemp(0).AsRegister<Register>(),
1611 locations->GetTemp(1).AsRegister<Register>(),
1612 base,
Nicolas Geoffray07276db2015-05-18 14:22:09 +01001613 value,
1614 value_can_be_null);
Mark Mendell58d25fd2015-04-03 14:52:31 -04001615 }
1616
1617 __ LockCmpxchgl(Address(base, offset, TIMES_1, 0), value);
1618 }
1619
1620 // locked cmpxchg has full barrier semantics, and we don't need scheduling
1621 // barriers at this time.
1622
1623 // Convert ZF into the boolean result.
1624 __ setb(kZero, out.AsRegister<Register>());
1625 __ movzxb(out.AsRegister<Register>(), out.AsRegister<ByteRegister>());
1626}
1627
1628void IntrinsicCodeGeneratorX86::VisitUnsafeCASInt(HInvoke* invoke) {
1629 GenCAS(Primitive::kPrimInt, invoke, codegen_);
1630}
1631
1632void IntrinsicCodeGeneratorX86::VisitUnsafeCASLong(HInvoke* invoke) {
1633 GenCAS(Primitive::kPrimLong, invoke, codegen_);
1634}
1635
1636void IntrinsicCodeGeneratorX86::VisitUnsafeCASObject(HInvoke* invoke) {
1637 GenCAS(Primitive::kPrimNot, invoke, codegen_);
1638}
1639
1640void IntrinsicLocationsBuilderX86::VisitIntegerReverse(HInvoke* invoke) {
1641 LocationSummary* locations = new (arena_) LocationSummary(invoke,
1642 LocationSummary::kNoCall,
1643 kIntrinsified);
1644 locations->SetInAt(0, Location::RequiresRegister());
1645 locations->SetOut(Location::SameAsFirstInput());
1646 locations->AddTemp(Location::RequiresRegister());
1647}
1648
1649static void SwapBits(Register reg, Register temp, int32_t shift, int32_t mask,
1650 X86Assembler* assembler) {
1651 Immediate imm_shift(shift);
1652 Immediate imm_mask(mask);
1653 __ movl(temp, reg);
1654 __ shrl(reg, imm_shift);
1655 __ andl(temp, imm_mask);
1656 __ andl(reg, imm_mask);
1657 __ shll(temp, imm_shift);
1658 __ orl(reg, temp);
1659}
1660
1661void IntrinsicCodeGeneratorX86::VisitIntegerReverse(HInvoke* invoke) {
1662 X86Assembler* assembler =
1663 reinterpret_cast<X86Assembler*>(codegen_->GetAssembler());
1664 LocationSummary* locations = invoke->GetLocations();
1665
1666 Register reg = locations->InAt(0).AsRegister<Register>();
1667 Register temp = locations->GetTemp(0).AsRegister<Register>();
1668
1669 /*
1670 * Use one bswap instruction to reverse byte order first and then use 3 rounds of
1671 * swapping bits to reverse bits in a number x. Using bswap to save instructions
1672 * compared to generic luni implementation which has 5 rounds of swapping bits.
1673 * x = bswap x
1674 * x = (x & 0x55555555) << 1 | (x >> 1) & 0x55555555;
1675 * x = (x & 0x33333333) << 2 | (x >> 2) & 0x33333333;
1676 * x = (x & 0x0F0F0F0F) << 4 | (x >> 4) & 0x0F0F0F0F;
1677 */
1678 __ bswapl(reg);
1679 SwapBits(reg, temp, 1, 0x55555555, assembler);
1680 SwapBits(reg, temp, 2, 0x33333333, assembler);
1681 SwapBits(reg, temp, 4, 0x0f0f0f0f, assembler);
1682}
1683
1684void IntrinsicLocationsBuilderX86::VisitLongReverse(HInvoke* invoke) {
1685 LocationSummary* locations = new (arena_) LocationSummary(invoke,
1686 LocationSummary::kNoCall,
1687 kIntrinsified);
1688 locations->SetInAt(0, Location::RequiresRegister());
1689 locations->SetOut(Location::SameAsFirstInput());
1690 locations->AddTemp(Location::RequiresRegister());
1691}
1692
1693void IntrinsicCodeGeneratorX86::VisitLongReverse(HInvoke* invoke) {
1694 X86Assembler* assembler =
1695 reinterpret_cast<X86Assembler*>(codegen_->GetAssembler());
1696 LocationSummary* locations = invoke->GetLocations();
1697
1698 Register reg_low = locations->InAt(0).AsRegisterPairLow<Register>();
1699 Register reg_high = locations->InAt(0).AsRegisterPairHigh<Register>();
1700 Register temp = locations->GetTemp(0).AsRegister<Register>();
1701
1702 // We want to swap high/low, then bswap each one, and then do the same
1703 // as a 32 bit reverse.
1704 // Exchange high and low.
1705 __ movl(temp, reg_low);
1706 __ movl(reg_low, reg_high);
1707 __ movl(reg_high, temp);
1708
1709 // bit-reverse low
1710 __ bswapl(reg_low);
1711 SwapBits(reg_low, temp, 1, 0x55555555, assembler);
1712 SwapBits(reg_low, temp, 2, 0x33333333, assembler);
1713 SwapBits(reg_low, temp, 4, 0x0f0f0f0f, assembler);
1714
1715 // bit-reverse high
1716 __ bswapl(reg_high);
1717 SwapBits(reg_high, temp, 1, 0x55555555, assembler);
1718 SwapBits(reg_high, temp, 2, 0x33333333, assembler);
1719 SwapBits(reg_high, temp, 4, 0x0f0f0f0f, assembler);
1720}
1721
Mark Mendell09ed1a32015-03-25 08:30:06 -04001722// Unimplemented intrinsics.
1723
1724#define UNIMPLEMENTED_INTRINSIC(Name) \
1725void IntrinsicLocationsBuilderX86::Visit ## Name(HInvoke* invoke ATTRIBUTE_UNUSED) { \
1726} \
1727void IntrinsicCodeGeneratorX86::Visit ## Name(HInvoke* invoke ATTRIBUTE_UNUSED) { \
1728}
1729
Mark Mendell09ed1a32015-03-25 08:30:06 -04001730UNIMPLEMENTED_INTRINSIC(MathRoundDouble)
Jeff Hao848f70a2014-01-15 13:49:50 -08001731UNIMPLEMENTED_INTRINSIC(StringGetCharsNoCheck)
Mark Mendell09ed1a32015-03-25 08:30:06 -04001732UNIMPLEMENTED_INTRINSIC(SystemArrayCopyChar)
Mark Mendell09ed1a32015-03-25 08:30:06 -04001733UNIMPLEMENTED_INTRINSIC(ReferenceGetReferent)
1734
1735} // namespace x86
1736} // namespace art