1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "test/unittests/compiler/instruction-selector-unittest.h"
7 #include "src/compiler/node-matchers.h"
13 // -----------------------------------------------------------------------------
17 TEST_F(InstructionSelectorTest, ChangeFloat32ToFloat64WithParameter) {
18 StreamBuilder m(this, kMachFloat32, kMachFloat64);
19 m.Return(m.ChangeFloat32ToFloat64(m.Parameter(0)));
21 ASSERT_EQ(1U, s.size());
22 EXPECT_EQ(kSSECvtss2sd, s[0]->arch_opcode());
23 EXPECT_EQ(1U, s[0]->InputCount());
24 EXPECT_EQ(1U, s[0]->OutputCount());
28 TEST_F(InstructionSelectorTest, ChangeInt32ToInt64WithParameter) {
29 StreamBuilder m(this, kMachInt64, kMachInt32);
30 m.Return(m.ChangeInt32ToInt64(m.Parameter(0)));
32 ASSERT_EQ(1U, s.size());
33 EXPECT_EQ(kX64Movsxlq, s[0]->arch_opcode());
37 TEST_F(InstructionSelectorTest, ChangeUint32ToFloat64WithParameter) {
38 StreamBuilder m(this, kMachFloat64, kMachUint32);
39 m.Return(m.ChangeUint32ToFloat64(m.Parameter(0)));
41 ASSERT_EQ(1U, s.size());
42 EXPECT_EQ(kSSEUint32ToFloat64, s[0]->arch_opcode());
46 TEST_F(InstructionSelectorTest, ChangeUint32ToUint64WithParameter) {
47 StreamBuilder m(this, kMachUint64, kMachUint32);
48 m.Return(m.ChangeUint32ToUint64(m.Parameter(0)));
50 ASSERT_EQ(1U, s.size());
51 EXPECT_EQ(kX64Movl, s[0]->arch_opcode());
55 TEST_F(InstructionSelectorTest, TruncateFloat64ToFloat32WithParameter) {
56 StreamBuilder m(this, kMachFloat64, kMachFloat32);
57 m.Return(m.TruncateFloat64ToFloat32(m.Parameter(0)));
59 ASSERT_EQ(1U, s.size());
60 EXPECT_EQ(kSSECvtsd2ss, s[0]->arch_opcode());
61 EXPECT_EQ(1U, s[0]->InputCount());
62 EXPECT_EQ(1U, s[0]->OutputCount());
66 TEST_F(InstructionSelectorTest, TruncateInt64ToInt32WithParameter) {
67 StreamBuilder m(this, kMachInt32, kMachInt64);
68 m.Return(m.TruncateInt64ToInt32(m.Parameter(0)));
70 ASSERT_EQ(1U, s.size());
71 EXPECT_EQ(kX64Movl, s[0]->arch_opcode());
75 // -----------------------------------------------------------------------------
82 ArchOpcode load_opcode;
83 ArchOpcode store_opcode;
87 std::ostream& operator<<(std::ostream& os, const MemoryAccess& memacc) {
88 return os << memacc.type;
92 static const MemoryAccess kMemoryAccesses[] = {
93 {kMachInt8, kX64Movsxbl, kX64Movb},
94 {kMachUint8, kX64Movzxbl, kX64Movb},
95 {kMachInt16, kX64Movsxwl, kX64Movw},
96 {kMachUint16, kX64Movzxwl, kX64Movw},
97 {kMachInt32, kX64Movl, kX64Movl},
98 {kMachUint32, kX64Movl, kX64Movl},
99 {kMachInt64, kX64Movq, kX64Movq},
100 {kMachUint64, kX64Movq, kX64Movq},
101 {kMachFloat32, kX64Movss, kX64Movss},
102 {kMachFloat64, kX64Movsd, kX64Movsd}};
107 typedef InstructionSelectorTestWithParam<MemoryAccess>
108 InstructionSelectorMemoryAccessTest;
111 TEST_P(InstructionSelectorMemoryAccessTest, LoadWithParameters) {
112 const MemoryAccess memacc = GetParam();
113 StreamBuilder m(this, memacc.type, kMachPtr, kMachInt32);
114 m.Return(m.Load(memacc.type, m.Parameter(0), m.Parameter(1)));
115 Stream s = m.Build();
116 ASSERT_EQ(1U, s.size());
117 EXPECT_EQ(memacc.load_opcode, s[0]->arch_opcode());
118 EXPECT_EQ(2U, s[0]->InputCount());
119 EXPECT_EQ(1U, s[0]->OutputCount());
123 TEST_P(InstructionSelectorMemoryAccessTest, StoreWithParameters) {
124 const MemoryAccess memacc = GetParam();
125 StreamBuilder m(this, kMachInt32, kMachPtr, kMachInt32, memacc.type);
126 m.Store(memacc.type, m.Parameter(0), m.Parameter(1), m.Parameter(2));
127 m.Return(m.Int32Constant(0));
128 Stream s = m.Build();
129 ASSERT_EQ(1U, s.size());
130 EXPECT_EQ(memacc.store_opcode, s[0]->arch_opcode());
131 EXPECT_EQ(3U, s[0]->InputCount());
132 EXPECT_EQ(0U, s[0]->OutputCount());
136 INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
137 InstructionSelectorMemoryAccessTest,
138 ::testing::ValuesIn(kMemoryAccesses));
140 // -----------------------------------------------------------------------------
141 // ChangeUint32ToUint64.
146 typedef Node* (RawMachineAssembler::*Constructor)(Node*, Node*);
149 struct BinaryOperation {
150 Constructor constructor;
151 const char* constructor_name;
155 std::ostream& operator<<(std::ostream& os, const BinaryOperation& bop) {
156 return os << bop.constructor_name;
160 const BinaryOperation kWord32BinaryOperations[] = {
161 {&RawMachineAssembler::Word32And, "Word32And"},
162 {&RawMachineAssembler::Word32Or, "Word32Or"},
163 {&RawMachineAssembler::Word32Xor, "Word32Xor"},
164 {&RawMachineAssembler::Word32Shl, "Word32Shl"},
165 {&RawMachineAssembler::Word32Shr, "Word32Shr"},
166 {&RawMachineAssembler::Word32Sar, "Word32Sar"},
167 {&RawMachineAssembler::Word32Ror, "Word32Ror"},
168 {&RawMachineAssembler::Word32Equal, "Word32Equal"},
169 {&RawMachineAssembler::Int32Add, "Int32Add"},
170 {&RawMachineAssembler::Int32Sub, "Int32Sub"},
171 {&RawMachineAssembler::Int32Mul, "Int32Mul"},
172 {&RawMachineAssembler::Int32MulHigh, "Int32MulHigh"},
173 {&RawMachineAssembler::Int32Div, "Int32Div"},
174 {&RawMachineAssembler::Int32LessThan, "Int32LessThan"},
175 {&RawMachineAssembler::Int32LessThanOrEqual, "Int32LessThanOrEqual"},
176 {&RawMachineAssembler::Int32Mod, "Int32Mod"},
177 {&RawMachineAssembler::Uint32Div, "Uint32Div"},
178 {&RawMachineAssembler::Uint32LessThan, "Uint32LessThan"},
179 {&RawMachineAssembler::Uint32LessThanOrEqual, "Uint32LessThanOrEqual"},
180 {&RawMachineAssembler::Uint32Mod, "Uint32Mod"}};
185 typedef InstructionSelectorTestWithParam<BinaryOperation>
186 InstructionSelectorChangeUint32ToUint64Test;
189 TEST_P(InstructionSelectorChangeUint32ToUint64Test, ChangeUint32ToUint64) {
190 const BinaryOperation& bop = GetParam();
191 StreamBuilder m(this, kMachUint64, kMachInt32, kMachInt32);
192 Node* const p0 = m.Parameter(0);
193 Node* const p1 = m.Parameter(1);
194 m.Return(m.ChangeUint32ToUint64((m.*bop.constructor)(p0, p1)));
195 Stream s = m.Build();
196 ASSERT_EQ(1U, s.size());
200 INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
201 InstructionSelectorChangeUint32ToUint64Test,
202 ::testing::ValuesIn(kWord32BinaryOperations));
205 // -----------------------------------------------------------------------------
206 // TruncateInt64ToInt32.
209 TEST_F(InstructionSelectorTest, TruncateInt64ToInt32WithWord64Sar) {
210 StreamBuilder m(this, kMachInt32, kMachInt64);
211 Node* const p = m.Parameter(0);
212 Node* const t = m.TruncateInt64ToInt32(m.Word64Sar(p, m.Int64Constant(32)));
214 Stream s = m.Build();
215 ASSERT_EQ(1U, s.size());
216 EXPECT_EQ(kX64Shr, s[0]->arch_opcode());
217 ASSERT_EQ(2U, s[0]->InputCount());
218 EXPECT_EQ(s.ToVreg(p), s.ToVreg(s[0]->InputAt(0)));
219 EXPECT_EQ(32, s.ToInt32(s[0]->InputAt(1)));
220 ASSERT_EQ(1U, s[0]->OutputCount());
221 EXPECT_TRUE(s.IsSameAsFirst(s[0]->OutputAt(0)));
222 EXPECT_EQ(s.ToVreg(t), s.ToVreg(s[0]->OutputAt(0)));
226 TEST_F(InstructionSelectorTest, TruncateInt64ToInt32WithWord64Shr) {
227 StreamBuilder m(this, kMachInt32, kMachInt64);
228 Node* const p = m.Parameter(0);
229 Node* const t = m.TruncateInt64ToInt32(m.Word64Shr(p, m.Int64Constant(32)));
231 Stream s = m.Build();
232 ASSERT_EQ(1U, s.size());
233 EXPECT_EQ(kX64Shr, s[0]->arch_opcode());
234 ASSERT_EQ(2U, s[0]->InputCount());
235 EXPECT_EQ(s.ToVreg(p), s.ToVreg(s[0]->InputAt(0)));
236 EXPECT_EQ(32, s.ToInt32(s[0]->InputAt(1)));
237 ASSERT_EQ(1U, s[0]->OutputCount());
238 EXPECT_TRUE(s.IsSameAsFirst(s[0]->OutputAt(0)));
239 EXPECT_EQ(s.ToVreg(t), s.ToVreg(s[0]->OutputAt(0)));
243 // -----------------------------------------------------------------------------
247 TEST_F(InstructionSelectorTest, Int32AddWithInt32ParametersLea) {
248 StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
249 Node* const p0 = m.Parameter(0);
250 Node* const p1 = m.Parameter(1);
251 Node* const a0 = m.Int32Add(p0, p1);
252 // Additional uses of input to add chooses lea
253 Node* const a1 = m.Int32Div(p0, p1);
254 m.Return(m.Int32Div(a0, a1));
255 Stream s = m.Build();
256 ASSERT_EQ(3U, s.size());
257 EXPECT_EQ(kX64Lea32, s[0]->arch_opcode());
258 ASSERT_EQ(2U, s[0]->InputCount());
259 EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
260 EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
264 TEST_F(InstructionSelectorTest, Int32AddConstantAsLeaSingle) {
265 StreamBuilder m(this, kMachInt32, kMachInt32);
266 Node* const p0 = m.Parameter(0);
267 Node* const c0 = m.Int32Constant(15);
268 // If one of the add's operands is only used once, use an "leal", even though
269 // an "addl" could be used. The "leal" has proven faster--out best guess is
270 // that it gives the register allocation more freedom and it doesn't set
271 // flags, reducing pressure in the CPU's pipeline. If we're lucky with
272 // register allocation, then code generation will select an "addl" later for
273 // the cases that have been measured to be faster.
274 Node* const v0 = m.Int32Add(p0, c0);
276 Stream s = m.Build();
277 ASSERT_EQ(1U, s.size());
278 EXPECT_EQ(kX64Lea32, s[0]->arch_opcode());
279 EXPECT_EQ(kMode_MRI, s[0]->addressing_mode());
280 ASSERT_EQ(2U, s[0]->InputCount());
281 EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
282 EXPECT_TRUE(s[0]->InputAt(1)->IsImmediate());
286 TEST_F(InstructionSelectorTest, Int32AddConstantAsAdd) {
287 StreamBuilder m(this, kMachInt32, kMachInt32);
288 Node* const p0 = m.Parameter(0);
289 Node* const c0 = m.Int32Constant(1);
290 // If there is only a single use of an add's input and the immediate constant
291 // for the add is 1, don't use an inc. It is much slower on modern Intel
293 m.Return(m.Int32Add(p0, c0));
294 Stream s = m.Build();
295 ASSERT_EQ(1U, s.size());
296 EXPECT_EQ(kX64Lea32, s[0]->arch_opcode());
297 EXPECT_EQ(kMode_MRI, s[0]->addressing_mode());
298 ASSERT_EQ(2U, s[0]->InputCount());
299 EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
300 EXPECT_TRUE(s[0]->InputAt(1)->IsImmediate());
304 TEST_F(InstructionSelectorTest, Int32AddConstantAsLeaDouble) {
305 StreamBuilder m(this, kMachInt32, kMachInt32);
306 Node* const p0 = m.Parameter(0);
307 Node* const c0 = m.Int32Constant(15);
308 // A second use of an add's input uses lea
309 Node* const a0 = m.Int32Add(p0, c0);
310 m.Return(m.Int32Div(a0, p0));
311 Stream s = m.Build();
312 ASSERT_EQ(2U, s.size());
313 EXPECT_EQ(kX64Lea32, s[0]->arch_opcode());
314 EXPECT_EQ(kMode_MRI, s[0]->addressing_mode());
315 ASSERT_EQ(2U, s[0]->InputCount());
316 EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
317 EXPECT_TRUE(s[0]->InputAt(1)->IsImmediate());
321 TEST_F(InstructionSelectorTest, Int32AddCommutedConstantAsLeaSingle) {
322 StreamBuilder m(this, kMachInt32, kMachInt32);
323 Node* const p0 = m.Parameter(0);
324 Node* const c0 = m.Int32Constant(15);
325 // If one of the add's operands is only used once, use an "leal", even though
326 // an "addl" could be used. The "leal" has proven faster--out best guess is
327 // that it gives the register allocation more freedom and it doesn't set
328 // flags, reducing pressure in the CPU's pipeline. If we're lucky with
329 // register allocation, then code generation will select an "addl" later for
330 // the cases that have been measured to be faster.
331 m.Return(m.Int32Add(c0, p0));
332 Stream s = m.Build();
333 ASSERT_EQ(1U, s.size());
334 EXPECT_EQ(kX64Lea32, s[0]->arch_opcode());
335 EXPECT_EQ(kMode_MRI, s[0]->addressing_mode());
336 ASSERT_EQ(2U, s[0]->InputCount());
337 EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
338 EXPECT_TRUE(s[0]->InputAt(1)->IsImmediate());
342 TEST_F(InstructionSelectorTest, Int32AddCommutedConstantAsLeaDouble) {
343 StreamBuilder m(this, kMachInt32, kMachInt32);
344 Node* const p0 = m.Parameter(0);
345 Node* const c0 = m.Int32Constant(15);
346 // A second use of an add's input uses lea
347 Node* const a0 = m.Int32Add(c0, p0);
349 m.Return(m.Int32Div(a0, p0));
350 Stream s = m.Build();
351 ASSERT_EQ(2U, s.size());
352 EXPECT_EQ(kX64Lea32, s[0]->arch_opcode());
353 EXPECT_EQ(kMode_MRI, s[0]->addressing_mode());
354 ASSERT_EQ(2U, s[0]->InputCount());
355 EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
356 EXPECT_TRUE(s[0]->InputAt(1)->IsImmediate());
360 TEST_F(InstructionSelectorTest, Int32AddSimpleAsAdd) {
361 StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
362 Node* const p0 = m.Parameter(0);
363 Node* const p1 = m.Parameter(1);
364 // If one of the add's operands is only used once, use an "leal", even though
365 // an "addl" could be used. The "leal" has proven faster--out best guess is
366 // that it gives the register allocation more freedom and it doesn't set
367 // flags, reducing pressure in the CPU's pipeline. If we're lucky with
368 // register allocation, then code generation will select an "addl" later for
369 // the cases that have been measured to be faster.
370 m.Return(m.Int32Add(p0, p1));
371 Stream s = m.Build();
372 ASSERT_EQ(1U, s.size());
373 EXPECT_EQ(kX64Lea32, s[0]->arch_opcode());
374 EXPECT_EQ(kMode_MR1, s[0]->addressing_mode());
375 ASSERT_EQ(2U, s[0]->InputCount());
376 EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
377 EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
381 TEST_F(InstructionSelectorTest, Int32AddSimpleAsLea) {
382 StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
383 Node* const p0 = m.Parameter(0);
384 Node* const p1 = m.Parameter(1);
385 // If all of of the add's operands are used multiple times, use an "leal".
386 Node* const v1 = m.Int32Add(p0, p1);
387 m.Return(m.Int32Add(m.Int32Add(v1, p1), p0));
388 Stream s = m.Build();
389 ASSERT_EQ(3U, s.size());
390 EXPECT_EQ(kX64Lea32, s[0]->arch_opcode());
391 EXPECT_EQ(kMode_MR1, s[0]->addressing_mode());
392 ASSERT_EQ(2U, s[0]->InputCount());
393 EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
394 EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
398 TEST_F(InstructionSelectorTest, Int32AddScaled2Mul) {
399 StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
400 Node* const p0 = m.Parameter(0);
401 Node* const p1 = m.Parameter(1);
402 Node* const s0 = m.Int32Mul(p1, m.Int32Constant(2));
403 m.Return(m.Int32Add(p0, s0));
404 Stream s = m.Build();
405 ASSERT_EQ(1U, s.size());
406 EXPECT_EQ(kX64Lea32, s[0]->arch_opcode());
407 EXPECT_EQ(kMode_MR2, s[0]->addressing_mode());
408 ASSERT_EQ(2U, s[0]->InputCount());
409 EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
410 EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
414 TEST_F(InstructionSelectorTest, Int32AddCommutedScaled2Mul) {
415 StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
416 Node* const p0 = m.Parameter(0);
417 Node* const p1 = m.Parameter(1);
418 Node* const s0 = m.Int32Mul(p1, m.Int32Constant(2));
419 m.Return(m.Int32Add(s0, p0));
420 Stream s = m.Build();
421 ASSERT_EQ(1U, s.size());
422 EXPECT_EQ(kX64Lea32, s[0]->arch_opcode());
423 EXPECT_EQ(kMode_MR2, s[0]->addressing_mode());
424 ASSERT_EQ(2U, s[0]->InputCount());
425 EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
426 EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
430 TEST_F(InstructionSelectorTest, Int32AddScaled2Shl) {
431 StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
432 Node* const p0 = m.Parameter(0);
433 Node* const p1 = m.Parameter(1);
434 Node* const s0 = m.Word32Shl(p1, m.Int32Constant(1));
435 m.Return(m.Int32Add(p0, s0));
436 Stream s = m.Build();
437 ASSERT_EQ(1U, s.size());
438 EXPECT_EQ(kX64Lea32, s[0]->arch_opcode());
439 EXPECT_EQ(kMode_MR2, s[0]->addressing_mode());
440 ASSERT_EQ(2U, s[0]->InputCount());
441 EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
442 EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
446 TEST_F(InstructionSelectorTest, Int32AddCommutedScaled2Shl) {
447 StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
448 Node* const p0 = m.Parameter(0);
449 Node* const p1 = m.Parameter(1);
450 Node* const s0 = m.Word32Shl(p1, m.Int32Constant(1));
451 m.Return(m.Int32Add(s0, p0));
452 Stream s = m.Build();
453 ASSERT_EQ(1U, s.size());
454 EXPECT_EQ(kX64Lea32, s[0]->arch_opcode());
455 EXPECT_EQ(kMode_MR2, s[0]->addressing_mode());
456 ASSERT_EQ(2U, s[0]->InputCount());
457 EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
458 EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
462 TEST_F(InstructionSelectorTest, Int32AddScaled4Mul) {
463 StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
464 Node* const p0 = m.Parameter(0);
465 Node* const p1 = m.Parameter(1);
466 Node* const s0 = m.Int32Mul(p1, m.Int32Constant(4));
467 m.Return(m.Int32Add(p0, s0));
468 Stream s = m.Build();
469 ASSERT_EQ(1U, s.size());
470 EXPECT_EQ(kX64Lea32, s[0]->arch_opcode());
471 EXPECT_EQ(kMode_MR4, s[0]->addressing_mode());
472 ASSERT_EQ(2U, s[0]->InputCount());
473 EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
474 EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
478 TEST_F(InstructionSelectorTest, Int32AddScaled4Shl) {
479 StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
480 Node* const p0 = m.Parameter(0);
481 Node* const p1 = m.Parameter(1);
482 Node* const s0 = m.Word32Shl(p1, m.Int32Constant(2));
483 m.Return(m.Int32Add(p0, s0));
484 Stream s = m.Build();
485 ASSERT_EQ(1U, s.size());
486 EXPECT_EQ(kX64Lea32, s[0]->arch_opcode());
487 EXPECT_EQ(kMode_MR4, s[0]->addressing_mode());
488 ASSERT_EQ(2U, s[0]->InputCount());
489 EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
490 EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
494 TEST_F(InstructionSelectorTest, Int32AddScaled8Mul) {
495 StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
496 Node* const p0 = m.Parameter(0);
497 Node* const p1 = m.Parameter(1);
498 Node* const s0 = m.Int32Mul(p1, m.Int32Constant(8));
499 m.Return(m.Int32Add(p0, s0));
500 Stream s = m.Build();
501 ASSERT_EQ(1U, s.size());
502 EXPECT_EQ(kX64Lea32, s[0]->arch_opcode());
503 EXPECT_EQ(kMode_MR8, s[0]->addressing_mode());
504 ASSERT_EQ(2U, s[0]->InputCount());
505 EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
506 EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
510 TEST_F(InstructionSelectorTest, Int32AddScaled8Shl) {
511 StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
512 Node* const p0 = m.Parameter(0);
513 Node* const p1 = m.Parameter(1);
514 Node* const s0 = m.Word32Shl(p1, m.Int32Constant(3));
515 m.Return(m.Int32Add(p0, s0));
516 Stream s = m.Build();
517 ASSERT_EQ(1U, s.size());
518 EXPECT_EQ(kX64Lea32, s[0]->arch_opcode());
519 EXPECT_EQ(kMode_MR8, s[0]->addressing_mode());
520 ASSERT_EQ(2U, s[0]->InputCount());
521 EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
522 EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
526 TEST_F(InstructionSelectorTest, Int32AddScaled2MulWithConstant) {
527 StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
528 Node* const p0 = m.Parameter(0);
529 Node* const p1 = m.Parameter(1);
530 Node* const s0 = m.Int32Mul(p1, m.Int32Constant(2));
531 Node* const c0 = m.Int32Constant(15);
532 m.Return(m.Int32Add(c0, m.Int32Add(p0, s0)));
533 Stream s = m.Build();
534 ASSERT_EQ(1U, s.size());
535 EXPECT_EQ(kX64Lea32, s[0]->arch_opcode());
536 EXPECT_EQ(kMode_MR2I, s[0]->addressing_mode());
537 ASSERT_EQ(3U, s[0]->InputCount());
538 EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
539 EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
540 EXPECT_TRUE(s[0]->InputAt(2)->IsImmediate());
544 TEST_F(InstructionSelectorTest, Int32AddScaled2MulWithConstantShuffle1) {
545 StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
546 Node* const p0 = m.Parameter(0);
547 Node* const p1 = m.Parameter(1);
548 Node* const s0 = m.Int32Mul(p1, m.Int32Constant(2));
549 Node* const c0 = m.Int32Constant(15);
550 m.Return(m.Int32Add(p0, m.Int32Add(s0, c0)));
551 Stream s = m.Build();
552 ASSERT_EQ(1U, s.size());
553 EXPECT_EQ(kX64Lea32, s[0]->arch_opcode());
554 EXPECT_EQ(kMode_MR2I, s[0]->addressing_mode());
555 ASSERT_EQ(3U, s[0]->InputCount());
556 EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
557 EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
558 EXPECT_TRUE(s[0]->InputAt(2)->IsImmediate());
562 TEST_F(InstructionSelectorTest, Int32AddScaled2MulWithConstantShuffle2) {
563 StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
564 Node* const p0 = m.Parameter(0);
565 Node* const p1 = m.Parameter(1);
566 Node* const s0 = m.Int32Mul(p1, m.Int32Constant(2));
567 Node* const c0 = m.Int32Constant(15);
568 m.Return(m.Int32Add(s0, m.Int32Add(c0, p0)));
569 Stream s = m.Build();
570 ASSERT_EQ(1U, s.size());
571 EXPECT_EQ(kX64Lea32, s[0]->arch_opcode());
572 EXPECT_EQ(kMode_MR2I, s[0]->addressing_mode());
573 ASSERT_EQ(3U, s[0]->InputCount());
574 EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
575 EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
576 EXPECT_TRUE(s[0]->InputAt(2)->IsImmediate());
580 TEST_F(InstructionSelectorTest, Int32AddScaled2MulWithConstantShuffle3) {
581 StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
582 Node* const p0 = m.Parameter(0);
583 Node* const p1 = m.Parameter(1);
584 Node* const s0 = m.Int32Mul(p1, m.Int32Constant(2));
585 Node* const c0 = m.Int32Constant(15);
586 m.Return(m.Int32Add(m.Int32Add(s0, c0), p0));
587 Stream s = m.Build();
588 ASSERT_EQ(1U, s.size());
589 EXPECT_EQ(kX64Lea32, s[0]->arch_opcode());
590 EXPECT_EQ(kMode_MR2I, s[0]->addressing_mode());
591 ASSERT_EQ(3U, s[0]->InputCount());
592 EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
593 EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
594 EXPECT_TRUE(s[0]->InputAt(2)->IsImmediate());
598 TEST_F(InstructionSelectorTest, Int32AddScaled2MulWithConstantShuffle4) {
599 StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
600 Node* const p0 = m.Parameter(0);
601 Node* const p1 = m.Parameter(1);
602 Node* const s0 = m.Int32Mul(p1, m.Int32Constant(2));
603 Node* const c0 = m.Int32Constant(15);
604 m.Return(m.Int32Add(m.Int32Add(c0, p0), s0));
605 Stream s = m.Build();
606 ASSERT_EQ(1U, s.size());
607 EXPECT_EQ(kX64Lea32, s[0]->arch_opcode());
608 EXPECT_EQ(kMode_MR2I, s[0]->addressing_mode());
609 ASSERT_EQ(3U, s[0]->InputCount());
610 EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
611 EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
612 EXPECT_TRUE(s[0]->InputAt(2)->IsImmediate());
616 TEST_F(InstructionSelectorTest, Int32AddScaled2MulWithConstantShuffle5) {
617 StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
618 Node* const p0 = m.Parameter(0);
619 Node* const p1 = m.Parameter(1);
620 Node* const s0 = m.Int32Mul(p1, m.Int32Constant(2));
621 Node* const c0 = m.Int32Constant(15);
622 m.Return(m.Int32Add(m.Int32Add(p0, s0), c0));
623 Stream s = m.Build();
624 ASSERT_EQ(1U, s.size());
625 EXPECT_EQ(kX64Lea32, s[0]->arch_opcode());
626 EXPECT_EQ(kMode_MR2I, s[0]->addressing_mode());
627 ASSERT_EQ(3U, s[0]->InputCount());
628 EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
629 EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
630 EXPECT_TRUE(s[0]->InputAt(2)->IsImmediate());
634 TEST_F(InstructionSelectorTest, Int32AddScaled2ShlWithConstant) {
635 StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
636 Node* const p0 = m.Parameter(0);
637 Node* const p1 = m.Parameter(1);
638 Node* const s0 = m.Word32Shl(p1, m.Int32Constant(1));
639 Node* const c0 = m.Int32Constant(15);
640 m.Return(m.Int32Add(c0, m.Int32Add(p0, s0)));
641 Stream s = m.Build();
642 ASSERT_EQ(1U, s.size());
643 EXPECT_EQ(kX64Lea32, s[0]->arch_opcode());
644 EXPECT_EQ(kMode_MR2I, s[0]->addressing_mode());
645 ASSERT_EQ(3U, s[0]->InputCount());
646 EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
647 EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
648 EXPECT_TRUE(s[0]->InputAt(2)->IsImmediate());
652 TEST_F(InstructionSelectorTest, Int32AddScaled4MulWithConstant) {
653 StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
654 Node* const p0 = m.Parameter(0);
655 Node* const p1 = m.Parameter(1);
656 Node* const s0 = m.Int32Mul(p1, m.Int32Constant(4));
657 Node* const c0 = m.Int32Constant(15);
658 m.Return(m.Int32Add(c0, m.Int32Add(p0, s0)));
659 Stream s = m.Build();
660 ASSERT_EQ(1U, s.size());
661 EXPECT_EQ(kX64Lea32, s[0]->arch_opcode());
662 EXPECT_EQ(kMode_MR4I, s[0]->addressing_mode());
663 ASSERT_EQ(3U, s[0]->InputCount());
664 EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
665 EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
666 EXPECT_TRUE(s[0]->InputAt(2)->IsImmediate());
670 TEST_F(InstructionSelectorTest, Int32AddScaled4ShlWithConstant) {
671 StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
672 Node* const p0 = m.Parameter(0);
673 Node* const p1 = m.Parameter(1);
674 Node* const s0 = m.Word32Shl(p1, m.Int32Constant(2));
675 Node* const c0 = m.Int32Constant(15);
676 m.Return(m.Int32Add(c0, m.Int32Add(p0, s0)));
677 Stream s = m.Build();
678 ASSERT_EQ(1U, s.size());
679 EXPECT_EQ(kX64Lea32, s[0]->arch_opcode());
680 EXPECT_EQ(kMode_MR4I, s[0]->addressing_mode());
681 ASSERT_EQ(3U, s[0]->InputCount());
682 EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
683 EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
684 EXPECT_TRUE(s[0]->InputAt(2)->IsImmediate());
688 TEST_F(InstructionSelectorTest, Int32AddScaled8MulWithConstant) {
689 StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
690 Node* const p0 = m.Parameter(0);
691 Node* const p1 = m.Parameter(1);
692 Node* const s0 = m.Int32Mul(p1, m.Int32Constant(8));
693 Node* const c0 = m.Int32Constant(15);
694 m.Return(m.Int32Add(c0, m.Int32Add(p0, s0)));
695 Stream s = m.Build();
696 ASSERT_EQ(1U, s.size());
697 EXPECT_EQ(kX64Lea32, s[0]->arch_opcode());
698 EXPECT_EQ(kMode_MR8I, s[0]->addressing_mode());
699 ASSERT_EQ(3U, s[0]->InputCount());
700 EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
701 EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
702 EXPECT_TRUE(s[0]->InputAt(2)->IsImmediate());
706 TEST_F(InstructionSelectorTest, Int32AddScaled8ShlWithConstant) {
707 StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
708 Node* const p0 = m.Parameter(0);
709 Node* const p1 = m.Parameter(1);
710 Node* const s0 = m.Word32Shl(p1, m.Int32Constant(3));
711 Node* const c0 = m.Int32Constant(15);
712 m.Return(m.Int32Add(c0, m.Int32Add(p0, s0)));
713 Stream s = m.Build();
714 ASSERT_EQ(1U, s.size());
715 EXPECT_EQ(kX64Lea32, s[0]->arch_opcode());
716 EXPECT_EQ(kMode_MR8I, s[0]->addressing_mode());
717 ASSERT_EQ(3U, s[0]->InputCount());
718 EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
719 EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
720 EXPECT_TRUE(s[0]->InputAt(2)->IsImmediate());
724 TEST_F(InstructionSelectorTest, Int32SubConstantAsSub) {
725 StreamBuilder m(this, kMachInt32, kMachInt32);
726 Node* const p0 = m.Parameter(0);
727 Node* const c0 = m.Int32Constant(-1);
728 // If there is only a single use of on of the sub's non-constant input, use a
729 // "subl" instruction.
730 m.Return(m.Int32Sub(p0, c0));
731 Stream s = m.Build();
732 ASSERT_EQ(1U, s.size());
733 EXPECT_EQ(kX64Lea32, s[0]->arch_opcode());
734 EXPECT_EQ(kMode_MRI, s[0]->addressing_mode());
735 ASSERT_EQ(2U, s[0]->InputCount());
736 EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
737 EXPECT_TRUE(s[0]->InputAt(1)->IsImmediate());
741 TEST_F(InstructionSelectorTest, Int32SubConstantAsLea) {
742 StreamBuilder m(this, kMachInt32, kMachInt32);
743 Node* const p0 = m.Parameter(0);
744 Node* const c0 = m.Int32Constant(-1);
745 // If there are multiple uses of on of the sub's non-constant input, use a
746 // "leal" instruction.
747 Node* const v0 = m.Int32Sub(p0, c0);
748 m.Return(m.Int32Div(p0, v0));
749 Stream s = m.Build();
750 ASSERT_EQ(2U, s.size());
751 EXPECT_EQ(kX64Lea32, s[0]->arch_opcode());
752 EXPECT_EQ(kMode_MRI, s[0]->addressing_mode());
753 ASSERT_EQ(2U, s[0]->InputCount());
754 EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
755 EXPECT_TRUE(s[0]->InputAt(1)->IsImmediate());
759 TEST_F(InstructionSelectorTest, Int32AddScaled2Other) {
760 StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32, kMachInt32);
761 Node* const p0 = m.Parameter(0);
762 Node* const p1 = m.Parameter(1);
763 Node* const p2 = m.Parameter(2);
764 Node* const s0 = m.Int32Mul(p1, m.Int32Constant(2));
765 Node* const a0 = m.Int32Add(s0, p2);
766 Node* const a1 = m.Int32Add(p0, a0);
768 Stream s = m.Build();
769 ASSERT_EQ(2U, s.size());
770 EXPECT_EQ(kX64Lea32, s[0]->arch_opcode());
771 EXPECT_EQ(kMode_MR2, s[0]->addressing_mode());
772 ASSERT_EQ(2U, s[0]->InputCount());
773 EXPECT_EQ(s.ToVreg(p2), s.ToVreg(s[0]->InputAt(0)));
774 EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
775 EXPECT_EQ(s.ToVreg(a0), s.ToVreg(s[0]->OutputAt(0)));
776 ASSERT_EQ(2U, s[1]->InputCount());
777 EXPECT_EQ(kX64Lea32, s[1]->arch_opcode());
778 EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[1]->InputAt(0)));
779 EXPECT_EQ(s.ToVreg(a0), s.ToVreg(s[1]->InputAt(1)));
780 EXPECT_EQ(s.ToVreg(a1), s.ToVreg(s[1]->OutputAt(0)));
784 // -----------------------------------------------------------------------------
788 TEST_F(InstructionSelectorTest, Int32MulWithInt32MulWithParameters) {
789 StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
790 Node* const p0 = m.Parameter(0);
791 Node* const p1 = m.Parameter(1);
792 Node* const m0 = m.Int32Mul(p0, p1);
793 m.Return(m.Int32Mul(m0, p0));
794 Stream s = m.Build();
795 ASSERT_EQ(2U, s.size());
796 EXPECT_EQ(kX64Imul32, s[0]->arch_opcode());
797 ASSERT_EQ(2U, s[0]->InputCount());
798 EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(0)));
799 EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(1)));
800 ASSERT_EQ(1U, s[0]->OutputCount());
801 EXPECT_EQ(s.ToVreg(m0), s.ToVreg(s[0]->OutputAt(0)));
802 EXPECT_EQ(kX64Imul32, s[1]->arch_opcode());
803 ASSERT_EQ(2U, s[1]->InputCount());
804 EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[1]->InputAt(0)));
805 EXPECT_EQ(s.ToVreg(m0), s.ToVreg(s[1]->InputAt(1)));
809 TEST_F(InstructionSelectorTest, Int32MulHigh) {
810 StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
811 Node* const p0 = m.Parameter(0);
812 Node* const p1 = m.Parameter(1);
813 Node* const n = m.Int32MulHigh(p0, p1);
815 Stream s = m.Build();
816 ASSERT_EQ(1U, s.size());
817 EXPECT_EQ(kX64ImulHigh32, s[0]->arch_opcode());
818 ASSERT_EQ(2U, s[0]->InputCount());
819 EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
820 EXPECT_TRUE(s.IsFixed(s[0]->InputAt(0), rax));
821 EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
822 EXPECT_TRUE(!s.IsUsedAtStart(s[0]->InputAt(1)));
823 ASSERT_LE(1U, s[0]->OutputCount());
824 EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
825 EXPECT_TRUE(s.IsFixed(s[0]->OutputAt(0), rdx));
829 TEST_F(InstructionSelectorTest, Uint32MulHigh) {
830 StreamBuilder m(this, kMachUint32, kMachUint32, kMachUint32);
831 Node* const p0 = m.Parameter(0);
832 Node* const p1 = m.Parameter(1);
833 Node* const n = m.Uint32MulHigh(p0, p1);
835 Stream s = m.Build();
836 ASSERT_EQ(1U, s.size());
837 EXPECT_EQ(kX64UmulHigh32, s[0]->arch_opcode());
838 ASSERT_EQ(2U, s[0]->InputCount());
839 EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
840 EXPECT_TRUE(s.IsFixed(s[0]->InputAt(0), rax));
841 EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
842 EXPECT_TRUE(!s.IsUsedAtStart(s[0]->InputAt(1)));
843 ASSERT_LE(1U, s[0]->OutputCount());
844 EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
845 EXPECT_TRUE(s.IsFixed(s[0]->OutputAt(0), rdx));
849 TEST_F(InstructionSelectorTest, Int32Mul2BecomesLea) {
850 StreamBuilder m(this, kMachUint32, kMachUint32, kMachUint32);
851 Node* const p0 = m.Parameter(0);
852 Node* const c1 = m.Int32Constant(2);
853 Node* const n = m.Int32Mul(p0, c1);
855 Stream s = m.Build();
856 ASSERT_EQ(1U, s.size());
857 EXPECT_EQ(kX64Lea32, s[0]->arch_opcode());
858 EXPECT_EQ(kMode_MR1, s[0]->addressing_mode());
859 ASSERT_EQ(2U, s[0]->InputCount());
860 EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
861 EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(1)));
865 TEST_F(InstructionSelectorTest, Int32Mul3BecomesLea) {
866 StreamBuilder m(this, kMachUint32, kMachUint32, kMachUint32);
867 Node* const p0 = m.Parameter(0);
868 Node* const c1 = m.Int32Constant(3);
869 Node* const n = m.Int32Mul(p0, c1);
871 Stream s = m.Build();
872 ASSERT_EQ(1U, s.size());
873 EXPECT_EQ(kX64Lea32, s[0]->arch_opcode());
874 EXPECT_EQ(kMode_MR2, s[0]->addressing_mode());
875 ASSERT_EQ(2U, s[0]->InputCount());
876 EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
877 EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(1)));
881 TEST_F(InstructionSelectorTest, Int32Mul4BecomesLea) {
882 StreamBuilder m(this, kMachUint32, kMachUint32, kMachUint32);
883 Node* const p0 = m.Parameter(0);
884 Node* const c1 = m.Int32Constant(4);
885 Node* const n = m.Int32Mul(p0, c1);
887 Stream s = m.Build();
888 ASSERT_EQ(1U, s.size());
889 EXPECT_EQ(kX64Lea32, s[0]->arch_opcode());
890 EXPECT_EQ(kMode_M4, s[0]->addressing_mode());
891 ASSERT_EQ(1U, s[0]->InputCount());
892 EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
896 TEST_F(InstructionSelectorTest, Int32Mul5BecomesLea) {
897 StreamBuilder m(this, kMachUint32, kMachUint32, kMachUint32);
898 Node* const p0 = m.Parameter(0);
899 Node* const c1 = m.Int32Constant(5);
900 Node* const n = m.Int32Mul(p0, c1);
902 Stream s = m.Build();
903 ASSERT_EQ(1U, s.size());
904 EXPECT_EQ(kX64Lea32, s[0]->arch_opcode());
905 EXPECT_EQ(kMode_MR4, s[0]->addressing_mode());
906 ASSERT_EQ(2U, s[0]->InputCount());
907 EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
908 EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(1)));
912 TEST_F(InstructionSelectorTest, Int32Mul8BecomesLea) {
913 StreamBuilder m(this, kMachUint32, kMachUint32, kMachUint32);
914 Node* const p0 = m.Parameter(0);
915 Node* const c1 = m.Int32Constant(8);
916 Node* const n = m.Int32Mul(p0, c1);
918 Stream s = m.Build();
919 ASSERT_EQ(1U, s.size());
920 EXPECT_EQ(kX64Lea32, s[0]->arch_opcode());
921 EXPECT_EQ(kMode_M8, s[0]->addressing_mode());
922 ASSERT_EQ(1U, s[0]->InputCount());
923 EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
927 TEST_F(InstructionSelectorTest, Int32Mul9BecomesLea) {
928 StreamBuilder m(this, kMachUint32, kMachUint32, kMachUint32);
929 Node* const p0 = m.Parameter(0);
930 Node* const c1 = m.Int32Constant(9);
931 Node* const n = m.Int32Mul(p0, c1);
933 Stream s = m.Build();
934 ASSERT_EQ(1U, s.size());
935 EXPECT_EQ(kX64Lea32, s[0]->arch_opcode());
936 EXPECT_EQ(kMode_MR8, s[0]->addressing_mode());
937 ASSERT_EQ(2U, s[0]->InputCount());
938 EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
939 EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(1)));
943 // -----------------------------------------------------------------------------
947 TEST_F(InstructionSelectorTest, Int32Shl1BecomesLea) {
948 StreamBuilder m(this, kMachUint32, kMachUint32, kMachUint32);
949 Node* const p0 = m.Parameter(0);
950 Node* const c1 = m.Int32Constant(1);
951 Node* const n = m.Word32Shl(p0, c1);
953 Stream s = m.Build();
954 ASSERT_EQ(1U, s.size());
955 EXPECT_EQ(kX64Lea32, s[0]->arch_opcode());
956 EXPECT_EQ(kMode_MR1, s[0]->addressing_mode());
957 ASSERT_EQ(2U, s[0]->InputCount());
958 EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
959 EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(1)));
963 TEST_F(InstructionSelectorTest, Int32Shl2BecomesLea) {
964 StreamBuilder m(this, kMachUint32, kMachUint32, kMachUint32);
965 Node* const p0 = m.Parameter(0);
966 Node* const c1 = m.Int32Constant(2);
967 Node* const n = m.Word32Shl(p0, c1);
969 Stream s = m.Build();
970 ASSERT_EQ(1U, s.size());
971 EXPECT_EQ(kX64Lea32, s[0]->arch_opcode());
972 EXPECT_EQ(kMode_M4, s[0]->addressing_mode());
973 ASSERT_EQ(1U, s[0]->InputCount());
974 EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
978 TEST_F(InstructionSelectorTest, Int32Shl4BecomesLea) {
979 StreamBuilder m(this, kMachUint32, kMachUint32, kMachUint32);
980 Node* const p0 = m.Parameter(0);
981 Node* const c1 = m.Int32Constant(3);
982 Node* const n = m.Word32Shl(p0, c1);
984 Stream s = m.Build();
985 ASSERT_EQ(1U, s.size());
986 EXPECT_EQ(kX64Lea32, s[0]->arch_opcode());
987 EXPECT_EQ(kMode_M8, s[0]->addressing_mode());
988 ASSERT_EQ(1U, s[0]->InputCount());
989 EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
993 // -----------------------------------------------------------------------------
994 // Floating point operations.
997 TEST_F(InstructionSelectorTest, Float64BinopArithmetic) {
999 StreamBuilder m(this, kMachFloat64, kMachFloat64, kMachFloat64);
1000 Node* add = m.Float64Add(m.Parameter(0), m.Parameter(1));
1001 Node* mul = m.Float64Mul(add, m.Parameter(1));
1002 Node* sub = m.Float64Sub(mul, add);
1003 Node* ret = m.Float64Div(mul, sub);
1005 Stream s = m.Build(AVX);
1006 ASSERT_EQ(4U, s.size());
1007 EXPECT_EQ(kAVXFloat64Add, s[0]->arch_opcode());
1008 EXPECT_EQ(kAVXFloat64Mul, s[1]->arch_opcode());
1009 EXPECT_EQ(kAVXFloat64Sub, s[2]->arch_opcode());
1010 EXPECT_EQ(kAVXFloat64Div, s[3]->arch_opcode());
1013 StreamBuilder m(this, kMachFloat64, kMachFloat64, kMachFloat64);
1014 Node* add = m.Float64Add(m.Parameter(0), m.Parameter(1));
1015 Node* mul = m.Float64Mul(add, m.Parameter(1));
1016 Node* sub = m.Float64Sub(mul, add);
1017 Node* ret = m.Float64Div(mul, sub);
1019 Stream s = m.Build();
1020 ASSERT_EQ(4U, s.size());
1021 EXPECT_EQ(kSSEFloat64Add, s[0]->arch_opcode());
1022 EXPECT_EQ(kSSEFloat64Mul, s[1]->arch_opcode());
1023 EXPECT_EQ(kSSEFloat64Sub, s[2]->arch_opcode());
1024 EXPECT_EQ(kSSEFloat64Div, s[3]->arch_opcode());
1029 // -----------------------------------------------------------------------------
1033 TEST_F(InstructionSelectorTest, Word64ShlWithChangeInt32ToInt64) {
1034 TRACED_FORRANGE(int64_t, x, 32, 63) {
1035 StreamBuilder m(this, kMachInt64, kMachInt32);
1036 Node* const p0 = m.Parameter(0);
1037 Node* const n = m.Word64Shl(m.ChangeInt32ToInt64(p0), m.Int64Constant(x));
1039 Stream s = m.Build();
1040 ASSERT_EQ(1U, s.size());
1041 EXPECT_EQ(kX64Shl, s[0]->arch_opcode());
1042 ASSERT_EQ(2U, s[0]->InputCount());
1043 EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
1044 EXPECT_EQ(x, s.ToInt32(s[0]->InputAt(1)));
1045 ASSERT_EQ(1U, s[0]->OutputCount());
1046 EXPECT_TRUE(s.IsSameAsFirst(s[0]->Output()));
1047 EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
1052 TEST_F(InstructionSelectorTest, Word64ShlWithChangeUint32ToUint64) {
1053 TRACED_FORRANGE(int64_t, x, 32, 63) {
1054 StreamBuilder m(this, kMachInt64, kMachUint32);
1055 Node* const p0 = m.Parameter(0);
1056 Node* const n = m.Word64Shl(m.ChangeUint32ToUint64(p0), m.Int64Constant(x));
1058 Stream s = m.Build();
1059 ASSERT_EQ(1U, s.size());
1060 EXPECT_EQ(kX64Shl, s[0]->arch_opcode());
1061 ASSERT_EQ(2U, s[0]->InputCount());
1062 EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
1063 EXPECT_EQ(x, s.ToInt32(s[0]->InputAt(1)));
1064 ASSERT_EQ(1U, s[0]->OutputCount());
1065 EXPECT_TRUE(s.IsSameAsFirst(s[0]->Output()));
1066 EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
1071 TEST_F(InstructionSelectorTest, Word32AndWith0xff) {
1073 StreamBuilder m(this, kMachInt32, kMachInt32);
1074 Node* const p0 = m.Parameter(0);
1075 Node* const n = m.Word32And(p0, m.Int32Constant(0xff));
1077 Stream s = m.Build();
1078 ASSERT_EQ(1U, s.size());
1079 EXPECT_EQ(kX64Movzxbl, s[0]->arch_opcode());
1080 ASSERT_EQ(1U, s[0]->InputCount());
1081 EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
1082 ASSERT_EQ(1U, s[0]->OutputCount());
1083 EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
1086 StreamBuilder m(this, kMachInt32, kMachInt32);
1087 Node* const p0 = m.Parameter(0);
1088 Node* const n = m.Word32And(m.Int32Constant(0xff), p0);
1090 Stream s = m.Build();
1091 ASSERT_EQ(1U, s.size());
1092 EXPECT_EQ(kX64Movzxbl, s[0]->arch_opcode());
1093 ASSERT_EQ(1U, s[0]->InputCount());
1094 EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
1095 ASSERT_EQ(1U, s[0]->OutputCount());
1096 EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
1101 TEST_F(InstructionSelectorTest, Word32AndWith0xffff) {
1103 StreamBuilder m(this, kMachInt32, kMachInt32);
1104 Node* const p0 = m.Parameter(0);
1105 Node* const n = m.Word32And(p0, m.Int32Constant(0xffff));
1107 Stream s = m.Build();
1108 ASSERT_EQ(1U, s.size());
1109 EXPECT_EQ(kX64Movzxwl, s[0]->arch_opcode());
1110 ASSERT_EQ(1U, s[0]->InputCount());
1111 EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
1112 ASSERT_EQ(1U, s[0]->OutputCount());
1113 EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
1116 StreamBuilder m(this, kMachInt32, kMachInt32);
1117 Node* const p0 = m.Parameter(0);
1118 Node* const n = m.Word32And(m.Int32Constant(0xffff), p0);
1120 Stream s = m.Build();
1121 ASSERT_EQ(1U, s.size());
1122 EXPECT_EQ(kX64Movzxwl, s[0]->arch_opcode());
1123 ASSERT_EQ(1U, s[0]->InputCount());
1124 EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
1125 ASSERT_EQ(1U, s[0]->OutputCount());
1126 EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
1130 } // namespace compiler
1131 } // namespace internal