1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 #include <iostream> // NOLINT(readability/streams)
32 #include "src/disassembler.h"
33 #include "src/factory.h"
34 #include "src/macro-assembler.h"
35 #include "src/mips64/macro-assembler-mips64.h"
36 #include "src/mips64/simulator-mips64.h"
38 #include "test/cctest/cctest.h"
40 using namespace v8::internal;
43 // Define these function prototypes to match JSEntryFunction in execution.cc.
44 typedef Object* (*F1)(int x, int p1, int p2, int p3, int p4);
45 typedef Object* (*F2)(int x, int y, int p2, int p3, int p4);
46 typedef Object* (*F3)(void* p, int p1, int p2, int p3, int p4);
53 CcTest::InitializeVM();
54 Isolate* isolate = CcTest::i_isolate();
55 HandleScope scope(isolate);
57 MacroAssembler assm(isolate, NULL, 0);
66 Handle<Code> code = isolate->factory()->NewCode(
67 desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
68 F2 f = FUNCTION_CAST<F2>(code->entry());
70 reinterpret_cast<int64_t>(CALL_GENERATED_CODE(f, 0xab0, 0xc, 0, 0, 0));
71 CHECK_EQ(0xabcL, res);
76 CcTest::InitializeVM();
77 Isolate* isolate = CcTest::i_isolate();
78 HandleScope scope(isolate);
80 MacroAssembler assm(isolate, NULL, 0);
94 __ Branch(&L, ne, v1, Operand((int64_t)0));
102 Handle<Code> code = isolate->factory()->NewCode(
103 desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
104 F1 f = FUNCTION_CAST<F1>(code->entry());
106 reinterpret_cast<int64_t>(CALL_GENERATED_CODE(f, 50, 0, 0, 0, 0));
107 CHECK_EQ(1275L, res);
112 CcTest::InitializeVM();
113 Isolate* isolate = CcTest::i_isolate();
114 HandleScope scope(isolate);
116 MacroAssembler assm(isolate, NULL, 0);
120 // ----- Test all instructions.
122 // Test lui, ori, and addiu, used in the li pseudo-instruction.
123 // This way we can then safely load registers with chosen values.
125 __ ori(a4, zero_reg, 0);
128 __ ori(a4, a4, 0x0f0f);
129 __ ori(a4, a4, 0xf0f0);
131 __ addiu(a6, a5, -0x10);
133 // Load values in temporary registers.
134 __ li(a4, 0x00000004);
135 __ li(a5, 0x00001234);
136 __ li(a6, 0x12345678);
137 __ li(a7, 0x7fffffff);
138 __ li(t0, 0xfffffffc);
139 __ li(t1, 0xffffedcc);
140 __ li(t2, 0xedcba988);
141 __ li(t3, 0x80000000);
144 __ srl(v0, a6, 8); // 0x00123456
145 __ sll(v0, v0, 11); // 0x91a2b000
146 __ sra(v0, v0, 3); // 0xf2345600
147 __ srav(v0, v0, a4); // 0xff234560
148 __ sllv(v0, v0, a4); // 0xf2345600
149 __ srlv(v0, v0, a4); // 0x0f234560
150 __ Branch(&error, ne, v0, Operand(0x0f234560));
153 __ addu(v0, a4, a5); // 0x00001238
154 __ subu(v0, v0, a4); // 0x00001234
155 __ Branch(&error, ne, v0, Operand(0x00001234));
157 __ addu(v1, a7, a4); // 32bit addu result is sign-extended into 64bit reg.
158 __ Branch(&error, ne, v1, Operand(0xffffffff80000003));
160 __ subu(v1, t3, a4); // 0x7ffffffc
161 __ Branch(&error, ne, v1, Operand(0x7ffffffc));
164 __ and_(v0, a5, a6); // 0x0000000000001230
165 __ or_(v0, v0, a5); // 0x0000000000001234
166 __ xor_(v0, v0, a6); // 0x000000001234444c
167 __ nor(v0, v0, a6); // 0xffffffffedcba987
168 __ Branch(&error, ne, v0, Operand(0xffffffffedcba983));
171 // Shift both 32bit number to left, to preserve meaning of next comparison.
172 __ dsll32(a7, a7, 0);
173 __ dsll32(t3, t3, 0);
176 __ Branch(&error, ne, v0, Operand(0x1));
179 __ Branch(&error, ne, v0, Operand(zero_reg));
182 // Restore original values in registers.
183 __ dsrl32(a7, a7, 0);
184 __ dsrl32(t3, t3, 0);
185 // End of SPECIAL class.
187 __ addiu(v0, zero_reg, 0x7421); // 0x00007421
188 __ addiu(v0, v0, -0x1); // 0x00007420
189 __ addiu(v0, v0, -0x20); // 0x00007400
190 __ Branch(&error, ne, v0, Operand(0x00007400));
192 __ addiu(v1, a7, 0x1); // 0x80000000 - result is sign-extended.
193 __ Branch(&error, ne, v1, Operand(0xffffffff80000000));
196 __ slti(v0, a5, 0x00002000); // 0x1
197 __ slti(v0, v0, 0xffff8000); // 0x0
198 __ Branch(&error, ne, v0, Operand(zero_reg));
200 __ sltiu(v0, a5, 0x00002000); // 0x1
201 __ sltiu(v0, v0, 0x00008000); // 0x1
202 __ Branch(&error, ne, v0, Operand(0x1));
205 __ andi(v0, a5, 0xf0f0); // 0x00001030
206 __ ori(v0, v0, 0x8a00); // 0x00009a30
207 __ xori(v0, v0, 0x83cc); // 0x000019fc
208 __ Branch(&error, ne, v0, Operand(0x000019fc));
210 __ lui(v1, 0x8123); // Result is sign-extended into 64bit register.
211 __ Branch(&error, ne, v1, Operand(0xffffffff81230000));
214 // Bit twiddling instructions & conditional moves.
215 // Uses a4-t3 as set above.
216 __ Clz(v0, a4); // 29
217 __ Clz(v1, a5); // 19
218 __ addu(v0, v0, v1); // 48
220 __ addu(v0, v0, v1); // 51
222 __ addu(v0, v0, v1); // 51
223 __ Branch(&error, ne, v0, Operand(51));
224 __ Movn(a0, a7, a4); // Move a0<-a7 (a4 is NOT 0).
225 __ Ins(a0, a5, 12, 8); // 0x7ff34fff
226 __ Branch(&error, ne, a0, Operand(0x7ff34fff));
227 __ Movz(a0, t2, t3); // a0 not updated (t3 is NOT 0).
228 __ Ext(a1, a0, 8, 12); // 0x34f
229 __ Branch(&error, ne, a1, Operand(0x34f));
230 __ Movz(a0, t2, v1); // a0<-t2, v0 is 0, from 8 instr back.
231 __ Branch(&error, ne, a0, Operand(t2));
233 // Everything was correctly executed. Load the expected result.
234 __ li(v0, 0x31415926);
239 // Got an error. Return a wrong result.
248 Handle<Code> code = isolate->factory()->NewCode(
249 desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
250 F2 f = FUNCTION_CAST<F2>(code->entry());
252 reinterpret_cast<int64_t>(CALL_GENERATED_CODE(f, 0xab0, 0xc, 0, 0, 0));
254 CHECK_EQ(0x31415926L, res);
259 // Test floating point instructions.
260 CcTest::InitializeVM();
261 Isolate* isolate = CcTest::i_isolate();
262 HandleScope scope(isolate);
277 // Create a function that accepts &t, and loads, manipulates, and stores
278 // the doubles t.a ... t.f.
279 MacroAssembler assm(isolate, NULL, 0);
282 __ ldc1(f4, MemOperand(a0, OFFSET_OF(T, a)) );
283 __ ldc1(f6, MemOperand(a0, OFFSET_OF(T, b)) );
284 __ add_d(f8, f4, f6);
285 __ sdc1(f8, MemOperand(a0, OFFSET_OF(T, c)) ); // c = a + b.
287 __ mov_d(f10, f8); // c
288 __ neg_d(f12, f6); // -b
289 __ sub_d(f10, f10, f12);
290 __ sdc1(f10, MemOperand(a0, OFFSET_OF(T, d)) ); // d = c - (-b).
292 __ sdc1(f4, MemOperand(a0, OFFSET_OF(T, b)) ); // b = a.
296 __ cvt_d_w(f14, f14); // f14 = 120.0.
297 __ mul_d(f10, f10, f14);
298 __ sdc1(f10, MemOperand(a0, OFFSET_OF(T, e)) ); // e = d * 120 = 1.8066e16.
300 __ div_d(f12, f10, f4);
301 __ sdc1(f12, MemOperand(a0, OFFSET_OF(T, f)) ); // f = e / a = 120.44.
304 __ sdc1(f14, MemOperand(a0, OFFSET_OF(T, g)) );
305 // g = sqrt(f) = 10.97451593465515908537
307 if (kArchVariant == kMips64r2) {
308 __ ldc1(f4, MemOperand(a0, OFFSET_OF(T, h)) );
309 __ ldc1(f6, MemOperand(a0, OFFSET_OF(T, i)) );
310 __ madd_d(f14, f6, f4, f6);
311 __ sdc1(f14, MemOperand(a0, OFFSET_OF(T, h)) );
319 Handle<Code> code = isolate->factory()->NewCode(
320 desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
321 F3 f = FUNCTION_CAST<F3>(code->entry());
330 Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
332 CHECK_EQ(1.5e14, t.a);
333 CHECK_EQ(1.5e14, t.b);
334 CHECK_EQ(1.50275e14, t.c);
335 CHECK_EQ(1.50550e14, t.d);
336 CHECK_EQ(1.8066e16, t.e);
337 CHECK_EQ(120.44, t.f);
338 CHECK_EQ(10.97451593465515908537, t.g);
339 if (kArchVariant == kMips64r2) {
340 CHECK_EQ(6.875, t.h);
346 // Test moves between floating point and integer registers.
347 CcTest::InitializeVM();
348 Isolate* isolate = CcTest::i_isolate();
349 HandleScope scope(isolate);
361 Assembler assm(isolate, NULL, 0);
364 __ ldc1(f4, MemOperand(a0, OFFSET_OF(T, a)));
365 __ ldc1(f5, MemOperand(a0, OFFSET_OF(T, b)));
367 // Swap f4 and f5, by using 3 integer registers, a4-a6,
368 // both two 32-bit chunks, and one 64-bit chunk.
369 // mXhc1 is mips32/64-r2 only, not r1,
370 // but we will not support r1 in practice.
379 // Store the swapped f4 and f5 back to memory.
380 __ sdc1(f4, MemOperand(a0, OFFSET_OF(T, a)));
381 __ sdc1(f5, MemOperand(a0, OFFSET_OF(T, c)));
383 // Test sign extension of move operations from coprocessor.
384 __ ldc1(f4, MemOperand(a0, OFFSET_OF(T, d)));
388 __ sd(a4, MemOperand(a0, OFFSET_OF(T, high)));
389 __ sd(a5, MemOperand(a0, OFFSET_OF(T, low)));
396 Handle<Code> code = isolate->factory()->NewCode(
397 desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
398 F3 f = FUNCTION_CAST<F3>(code->entry());
403 Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
406 CHECK_EQ(2.75e11, t.a);
407 CHECK_EQ(2.75e11, t.b);
408 CHECK_EQ(1.5e22, t.c);
409 CHECK_EQ(static_cast<int64_t>(0xffffffffc25001d1L), t.high);
410 CHECK_EQ(static_cast<int64_t>(0xffffffffbf800000L), t.low);
415 // Test conversions between doubles and integers.
416 CcTest::InitializeVM();
417 Isolate* isolate = CcTest::i_isolate();
418 HandleScope scope(isolate);
428 Assembler assm(isolate, NULL, 0);
431 // Load all structure elements to registers.
432 __ ldc1(f4, MemOperand(a0, OFFSET_OF(T, a)) );
433 __ ldc1(f6, MemOperand(a0, OFFSET_OF(T, b)) );
434 __ lw(a4, MemOperand(a0, OFFSET_OF(T, i)) );
435 __ lw(a5, MemOperand(a0, OFFSET_OF(T, j)) );
437 // Convert double in f4 to int in element i.
440 __ sw(a6, MemOperand(a0, OFFSET_OF(T, i)) );
442 // Convert double in f6 to int in element j.
445 __ sw(a7, MemOperand(a0, OFFSET_OF(T, j)) );
447 // Convert int in original i (a4) to double in a.
450 __ sdc1(f0, MemOperand(a0, OFFSET_OF(T, a)) );
452 // Convert int in original j (a5) to double in b.
455 __ sdc1(f2, MemOperand(a0, OFFSET_OF(T, b)) );
462 Handle<Code> code = isolate->factory()->NewCode(
463 desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
464 F3 f = FUNCTION_CAST<F3>(code->entry());
469 Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
472 CHECK_EQ(12345678.0, t.a);
473 CHECK_EQ(-100000.0, t.b);
474 CHECK_EQ(15000, t.i);
475 CHECK_EQ(275000000, t.j);
480 // Test simple memory loads and stores.
481 CcTest::InitializeVM();
482 Isolate* isolate = CcTest::i_isolate();
483 HandleScope scope(isolate);
497 Assembler assm(isolate, NULL, 0);
500 // Basic word load/store.
501 __ lw(a4, MemOperand(a0, OFFSET_OF(T, ui)) );
502 __ sw(a4, MemOperand(a0, OFFSET_OF(T, r1)) );
504 // lh with positive data.
505 __ lh(a5, MemOperand(a0, OFFSET_OF(T, ui)) );
506 __ sw(a5, MemOperand(a0, OFFSET_OF(T, r2)) );
508 // lh with negative data.
509 __ lh(a6, MemOperand(a0, OFFSET_OF(T, si)) );
510 __ sw(a6, MemOperand(a0, OFFSET_OF(T, r3)) );
512 // lhu with negative data.
513 __ lhu(a7, MemOperand(a0, OFFSET_OF(T, si)) );
514 __ sw(a7, MemOperand(a0, OFFSET_OF(T, r4)) );
516 // lb with negative data.
517 __ lb(t0, MemOperand(a0, OFFSET_OF(T, si)) );
518 __ sw(t0, MemOperand(a0, OFFSET_OF(T, r5)) );
520 // sh writes only 1/2 of word.
522 __ ori(t1, t1, 0x3333);
523 __ sw(t1, MemOperand(a0, OFFSET_OF(T, r6)) );
524 __ lhu(t1, MemOperand(a0, OFFSET_OF(T, si)) );
525 __ sh(t1, MemOperand(a0, OFFSET_OF(T, r6)) );
532 Handle<Code> code = isolate->factory()->NewCode(
533 desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
534 F3 f = FUNCTION_CAST<F3>(code->entry());
537 Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
540 CHECK_EQ(static_cast<int32_t>(0x11223344), t.r1);
541 CHECK_EQ(static_cast<int32_t>(0x3344), t.r2);
542 CHECK_EQ(static_cast<int32_t>(0xffffbbcc), t.r3);
543 CHECK_EQ(static_cast<int32_t>(0x0000bbcc), t.r4);
544 CHECK_EQ(static_cast<int32_t>(0xffffffcc), t.r5);
545 CHECK_EQ(static_cast<int32_t>(0x3333bbcc), t.r6);
550 // Test floating point compare and branch instructions.
551 CcTest::InitializeVM();
552 Isolate* isolate = CcTest::i_isolate();
553 HandleScope scope(isolate);
566 // Create a function that accepts &t, and loads, manipulates, and stores
567 // the doubles t.a ... t.f.
568 MacroAssembler assm(isolate, NULL, 0);
569 Label neither_is_nan, less_than, outa_here;
571 __ ldc1(f4, MemOperand(a0, OFFSET_OF(T, a)) );
572 __ ldc1(f6, MemOperand(a0, OFFSET_OF(T, b)) );
573 if (kArchVariant != kMips64r6) {
575 __ bc1f(&neither_is_nan);
577 __ cmp(UN, L, f2, f4, f6);
578 __ bc1eqz(&neither_is_nan, f2);
581 __ sw(zero_reg, MemOperand(a0, OFFSET_OF(T, result)) );
582 __ Branch(&outa_here);
584 __ bind(&neither_is_nan);
586 if (kArchVariant == kMips64r6) {
587 __ cmp(OLT, L, f2, f6, f4);
588 __ bc1nez(&less_than, f2);
590 __ c(OLT, D, f6, f4, 2);
591 __ bc1t(&less_than, 2);
595 __ sw(zero_reg, MemOperand(a0, OFFSET_OF(T, result)) );
596 __ Branch(&outa_here);
599 __ Addu(a4, zero_reg, Operand(1));
600 __ sw(a4, MemOperand(a0, OFFSET_OF(T, result)) ); // Set true.
603 // This test-case should have additional tests.
612 Handle<Code> code = isolate->factory()->NewCode(
613 desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
614 F3 f = FUNCTION_CAST<F3>(code->entry());
622 Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
624 CHECK_EQ(1.5e14, t.a);
625 CHECK_EQ(2.75e11, t.b);
626 CHECK_EQ(1, t.result);
631 // Test ROTR and ROTRV instructions.
632 CcTest::InitializeVM();
633 Isolate* isolate = CcTest::i_isolate();
634 HandleScope scope(isolate);
638 int32_t result_rotr_4;
639 int32_t result_rotr_8;
640 int32_t result_rotr_12;
641 int32_t result_rotr_16;
642 int32_t result_rotr_20;
643 int32_t result_rotr_24;
644 int32_t result_rotr_28;
645 int32_t result_rotrv_4;
646 int32_t result_rotrv_8;
647 int32_t result_rotrv_12;
648 int32_t result_rotrv_16;
649 int32_t result_rotrv_20;
650 int32_t result_rotrv_24;
651 int32_t result_rotrv_28;
655 MacroAssembler assm(isolate, NULL, 0);
658 __ lw(a4, MemOperand(a0, OFFSET_OF(T, input)) );
660 // ROTR instruction (called through the Ror macro).
661 __ Ror(a5, a4, 0x0004);
662 __ Ror(a6, a4, 0x0008);
663 __ Ror(a7, a4, 0x000c);
664 __ Ror(t0, a4, 0x0010);
665 __ Ror(t1, a4, 0x0014);
666 __ Ror(t2, a4, 0x0018);
667 __ Ror(t3, a4, 0x001c);
670 __ sw(a5, MemOperand(a0, OFFSET_OF(T, result_rotr_4)) );
671 __ sw(a6, MemOperand(a0, OFFSET_OF(T, result_rotr_8)) );
672 __ sw(a7, MemOperand(a0, OFFSET_OF(T, result_rotr_12)) );
673 __ sw(t0, MemOperand(a0, OFFSET_OF(T, result_rotr_16)) );
674 __ sw(t1, MemOperand(a0, OFFSET_OF(T, result_rotr_20)) );
675 __ sw(t2, MemOperand(a0, OFFSET_OF(T, result_rotr_24)) );
676 __ sw(t3, MemOperand(a0, OFFSET_OF(T, result_rotr_28)) );
678 // ROTRV instruction (called through the Ror macro).
695 __ sw(a5, MemOperand(a0, OFFSET_OF(T, result_rotrv_4)) );
696 __ sw(a6, MemOperand(a0, OFFSET_OF(T, result_rotrv_8)) );
697 __ sw(a7, MemOperand(a0, OFFSET_OF(T, result_rotrv_12)) );
698 __ sw(t0, MemOperand(a0, OFFSET_OF(T, result_rotrv_16)) );
699 __ sw(t1, MemOperand(a0, OFFSET_OF(T, result_rotrv_20)) );
700 __ sw(t2, MemOperand(a0, OFFSET_OF(T, result_rotrv_24)) );
701 __ sw(t3, MemOperand(a0, OFFSET_OF(T, result_rotrv_28)) );
708 Handle<Code> code = isolate->factory()->NewCode(
709 desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
710 F3 f = FUNCTION_CAST<F3>(code->entry());
711 t.input = 0x12345678;
712 Object* dummy = CALL_GENERATED_CODE(f, &t, 0x0, 0, 0, 0);
714 CHECK_EQ(static_cast<int32_t>(0x81234567), t.result_rotr_4);
715 CHECK_EQ(static_cast<int32_t>(0x78123456), t.result_rotr_8);
716 CHECK_EQ(static_cast<int32_t>(0x67812345), t.result_rotr_12);
717 CHECK_EQ(static_cast<int32_t>(0x56781234), t.result_rotr_16);
718 CHECK_EQ(static_cast<int32_t>(0x45678123), t.result_rotr_20);
719 CHECK_EQ(static_cast<int32_t>(0x34567812), t.result_rotr_24);
720 CHECK_EQ(static_cast<int32_t>(0x23456781), t.result_rotr_28);
722 CHECK_EQ(static_cast<int32_t>(0x81234567), t.result_rotrv_4);
723 CHECK_EQ(static_cast<int32_t>(0x78123456), t.result_rotrv_8);
724 CHECK_EQ(static_cast<int32_t>(0x67812345), t.result_rotrv_12);
725 CHECK_EQ(static_cast<int32_t>(0x56781234), t.result_rotrv_16);
726 CHECK_EQ(static_cast<int32_t>(0x45678123), t.result_rotrv_20);
727 CHECK_EQ(static_cast<int32_t>(0x34567812), t.result_rotrv_24);
728 CHECK_EQ(static_cast<int32_t>(0x23456781), t.result_rotrv_28);
733 // Test BRANCH improvements.
734 CcTest::InitializeVM();
735 Isolate* isolate = CcTest::i_isolate();
736 HandleScope scope(isolate);
738 MacroAssembler assm(isolate, NULL, 0);
739 Label exit, exit2, exit3;
741 __ Branch(&exit, ge, a0, Operand(zero_reg));
742 __ Branch(&exit2, ge, a0, Operand(0x00001FFF));
743 __ Branch(&exit3, ge, a0, Operand(0x0001FFFF));
753 isolate->factory()->NewCode(
754 desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
759 // Test conversions between doubles and long integers.
760 // Test hos the long ints map to FP regs pairs.
761 CcTest::InitializeVM();
762 Isolate* isolate = CcTest::i_isolate();
763 HandleScope scope(isolate);
773 int64_t long_as_int64;
776 int64_t b_long_as_int64;
780 Assembler assm(isolate, NULL, 0);
783 if (kArchVariant == kMips64r2) {
784 // Rewritten for FR=1 FPU mode:
785 // - 32 FP regs of 64-bits each, no odd/even pairs.
786 // - Note that cvt_l_d/cvt_d_l ARE legal in FR=1 mode.
787 // Load all structure elements to registers.
788 __ ldc1(f0, MemOperand(a0, OFFSET_OF(T, a)));
790 // Save the raw bits of the double.
793 __ sw(a4, MemOperand(a0, OFFSET_OF(T, dbl_mant)));
794 __ sw(a5, MemOperand(a0, OFFSET_OF(T, dbl_exp)));
796 // Convert double in f0 to long, save hi/lo parts.
798 __ mfc1(a4, f0); // f0 LS 32 bits of long.
799 __ mfhc1(a5, f0); // f0 MS 32 bits of long.
800 __ sw(a4, MemOperand(a0, OFFSET_OF(T, long_lo)));
801 __ sw(a5, MemOperand(a0, OFFSET_OF(T, long_hi)));
803 // Combine the high/low ints, convert back to double.
804 __ dsll32(a6, a5, 0); // Move a5 to high bits of a6.
808 __ sdc1(f1, MemOperand(a0, OFFSET_OF(T, a_converted)));
811 // Convert the b long integers to double b.
812 __ lw(a4, MemOperand(a0, OFFSET_OF(T, b_long_lo)));
813 __ lw(a5, MemOperand(a0, OFFSET_OF(T, b_long_hi)));
814 __ mtc1(a4, f8); // f8 LS 32-bits.
815 __ mthc1(a5, f8); // f8 MS 32-bits.
817 __ sdc1(f10, MemOperand(a0, OFFSET_OF(T, b)));
819 // Convert double b back to long-int.
820 __ ldc1(f31, MemOperand(a0, OFFSET_OF(T, b)));
821 __ cvt_l_d(f31, f31);
823 __ sd(a7, MemOperand(a0, OFFSET_OF(T, b_long_as_int64)));
831 Handle<Code> code = isolate->factory()->NewCode(
832 desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
833 F3 f = FUNCTION_CAST<F3>(code->entry());
834 t.a = 2.147483647e9; // 0x7fffffff -> 0x41DFFFFFFFC00000 as double.
835 t.b_long_hi = 0x000000ff; // 0xFF00FF00FF -> 0x426FE01FE01FE000 as double.
836 t.b_long_lo = 0x00ff00ff;
837 Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
840 CHECK_EQ(static_cast<int32_t>(0x41DFFFFF), t.dbl_exp);
841 CHECK_EQ(static_cast<int32_t>(0xFFC00000), t.dbl_mant);
842 CHECK_EQ(0, t.long_hi);
843 CHECK_EQ(static_cast<int32_t>(0x7fffffff), t.long_lo);
844 CHECK_EQ(2.147483647e9, t.a_converted);
846 // 0xFF00FF00FF -> 1.095233372415e12.
847 CHECK_EQ(1.095233372415e12, t.b);
848 CHECK_EQ(static_cast<int64_t>(0xFF00FF00FF), t.b_long_as_int64);
854 // Do not run test on MIPS64r6, as these instructions are removed.
855 if (kArchVariant != kMips64r6) {
856 // Test LWL, LWR, SWL and SWR instructions.
857 CcTest::InitializeVM();
858 Isolate* isolate = CcTest::i_isolate();
859 HandleScope scope(isolate);
883 Assembler assm(isolate, NULL, 0);
885 // Test all combinations of LWL and vAddr.
886 __ lw(a4, MemOperand(a0, OFFSET_OF(T, reg_init)));
887 __ lwl(a4, MemOperand(a0, OFFSET_OF(T, mem_init)));
888 __ sw(a4, MemOperand(a0, OFFSET_OF(T, lwl_0)));
890 __ lw(a5, MemOperand(a0, OFFSET_OF(T, reg_init)));
891 __ lwl(a5, MemOperand(a0, OFFSET_OF(T, mem_init) + 1));
892 __ sw(a5, MemOperand(a0, OFFSET_OF(T, lwl_1)));
894 __ lw(a6, MemOperand(a0, OFFSET_OF(T, reg_init)));
895 __ lwl(a6, MemOperand(a0, OFFSET_OF(T, mem_init) + 2));
896 __ sw(a6, MemOperand(a0, OFFSET_OF(T, lwl_2)));
898 __ lw(a7, MemOperand(a0, OFFSET_OF(T, reg_init)));
899 __ lwl(a7, MemOperand(a0, OFFSET_OF(T, mem_init) + 3));
900 __ sw(a7, MemOperand(a0, OFFSET_OF(T, lwl_3)));
902 // Test all combinations of LWR and vAddr.
903 __ lw(a4, MemOperand(a0, OFFSET_OF(T, reg_init)));
904 __ lwr(a4, MemOperand(a0, OFFSET_OF(T, mem_init)));
905 __ sw(a4, MemOperand(a0, OFFSET_OF(T, lwr_0)));
907 __ lw(a5, MemOperand(a0, OFFSET_OF(T, reg_init)));
908 __ lwr(a5, MemOperand(a0, OFFSET_OF(T, mem_init) + 1));
909 __ sw(a5, MemOperand(a0, OFFSET_OF(T, lwr_1)));
911 __ lw(a6, MemOperand(a0, OFFSET_OF(T, reg_init)));
912 __ lwr(a6, MemOperand(a0, OFFSET_OF(T, mem_init) + 2));
913 __ sw(a6, MemOperand(a0, OFFSET_OF(T, lwr_2)) );
915 __ lw(a7, MemOperand(a0, OFFSET_OF(T, reg_init)));
916 __ lwr(a7, MemOperand(a0, OFFSET_OF(T, mem_init) + 3));
917 __ sw(a7, MemOperand(a0, OFFSET_OF(T, lwr_3)) );
919 // Test all combinations of SWL and vAddr.
920 __ lw(a4, MemOperand(a0, OFFSET_OF(T, mem_init)));
921 __ sw(a4, MemOperand(a0, OFFSET_OF(T, swl_0)));
922 __ lw(a4, MemOperand(a0, OFFSET_OF(T, reg_init)));
923 __ swl(a4, MemOperand(a0, OFFSET_OF(T, swl_0)));
925 __ lw(a5, MemOperand(a0, OFFSET_OF(T, mem_init)));
926 __ sw(a5, MemOperand(a0, OFFSET_OF(T, swl_1)));
927 __ lw(a5, MemOperand(a0, OFFSET_OF(T, reg_init)));
928 __ swl(a5, MemOperand(a0, OFFSET_OF(T, swl_1) + 1));
930 __ lw(a6, MemOperand(a0, OFFSET_OF(T, mem_init)));
931 __ sw(a6, MemOperand(a0, OFFSET_OF(T, swl_2)));
932 __ lw(a6, MemOperand(a0, OFFSET_OF(T, reg_init)));
933 __ swl(a6, MemOperand(a0, OFFSET_OF(T, swl_2) + 2));
935 __ lw(a7, MemOperand(a0, OFFSET_OF(T, mem_init)));
936 __ sw(a7, MemOperand(a0, OFFSET_OF(T, swl_3)));
937 __ lw(a7, MemOperand(a0, OFFSET_OF(T, reg_init)));
938 __ swl(a7, MemOperand(a0, OFFSET_OF(T, swl_3) + 3));
940 // Test all combinations of SWR and vAddr.
941 __ lw(a4, MemOperand(a0, OFFSET_OF(T, mem_init)));
942 __ sw(a4, MemOperand(a0, OFFSET_OF(T, swr_0)));
943 __ lw(a4, MemOperand(a0, OFFSET_OF(T, reg_init)));
944 __ swr(a4, MemOperand(a0, OFFSET_OF(T, swr_0)));
946 __ lw(a5, MemOperand(a0, OFFSET_OF(T, mem_init)));
947 __ sw(a5, MemOperand(a0, OFFSET_OF(T, swr_1)));
948 __ lw(a5, MemOperand(a0, OFFSET_OF(T, reg_init)));
949 __ swr(a5, MemOperand(a0, OFFSET_OF(T, swr_1) + 1));
951 __ lw(a6, MemOperand(a0, OFFSET_OF(T, mem_init)));
952 __ sw(a6, MemOperand(a0, OFFSET_OF(T, swr_2)));
953 __ lw(a6, MemOperand(a0, OFFSET_OF(T, reg_init)));
954 __ swr(a6, MemOperand(a0, OFFSET_OF(T, swr_2) + 2));
956 __ lw(a7, MemOperand(a0, OFFSET_OF(T, mem_init)));
957 __ sw(a7, MemOperand(a0, OFFSET_OF(T, swr_3)));
958 __ lw(a7, MemOperand(a0, OFFSET_OF(T, reg_init)));
959 __ swr(a7, MemOperand(a0, OFFSET_OF(T, swr_3) + 3));
966 Handle<Code> code = isolate->factory()->NewCode(
967 desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
968 F3 f = FUNCTION_CAST<F3>(code->entry());
969 t.reg_init = 0xaabbccdd;
970 t.mem_init = 0x11223344;
972 Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
975 CHECK_EQ(static_cast<int32_t>(0x44bbccdd), t.lwl_0);
976 CHECK_EQ(static_cast<int32_t>(0x3344ccdd), t.lwl_1);
977 CHECK_EQ(static_cast<int32_t>(0x223344dd), t.lwl_2);
978 CHECK_EQ(static_cast<int32_t>(0x11223344), t.lwl_3);
980 CHECK_EQ(static_cast<int32_t>(0x11223344), t.lwr_0);
981 CHECK_EQ(static_cast<int32_t>(0xaa112233), t.lwr_1);
982 CHECK_EQ(static_cast<int32_t>(0xaabb1122), t.lwr_2);
983 CHECK_EQ(static_cast<int32_t>(0xaabbcc11), t.lwr_3);
985 CHECK_EQ(static_cast<int32_t>(0x112233aa), t.swl_0);
986 CHECK_EQ(static_cast<int32_t>(0x1122aabb), t.swl_1);
987 CHECK_EQ(static_cast<int32_t>(0x11aabbcc), t.swl_2);
988 CHECK_EQ(static_cast<int32_t>(0xaabbccdd), t.swl_3);
990 CHECK_EQ(static_cast<int32_t>(0xaabbccdd), t.swr_0);
991 CHECK_EQ(static_cast<int32_t>(0xbbccdd44), t.swr_1);
992 CHECK_EQ(static_cast<int32_t>(0xccdd3344), t.swr_2);
993 CHECK_EQ(static_cast<int32_t>(0xdd223344), t.swr_3);
999 CcTest::InitializeVM();
1000 Isolate* isolate = CcTest::i_isolate();
1001 HandleScope scope(isolate);
1013 MacroAssembler assm(isolate, NULL, 0);
1015 __ mov(t2, fp); // Save frame pointer.
1016 __ mov(fp, a0); // Access struct T by fp.
1017 __ lw(a4, MemOperand(a0, OFFSET_OF(T, y)));
1018 __ lw(a7, MemOperand(a0, OFFSET_OF(T, y4)));
1020 __ addu(a5, a4, a7);
1021 __ subu(t0, a4, a7);
1023 __ push(a4); // These instructions disappear after opt.
1025 __ addu(a4, a4, a4);
1027 __ Pop(); // These instructions disappear after opt.
1030 __ push(a7); // These instructions disappear after opt.
1036 __ sw(a4, MemOperand(fp, OFFSET_OF(T, y)));
1037 __ lw(a4, MemOperand(fp, OFFSET_OF(T, y)));
1039 __ sw(a4, MemOperand(fp, OFFSET_OF(T, y)));
1040 __ lw(a5, MemOperand(fp, OFFSET_OF(T, y)));
1043 __ lw(a5, MemOperand(fp, OFFSET_OF(T, y)));
1047 __ lw(a6, MemOperand(fp, OFFSET_OF(T, y)));
1051 __ lw(a6, MemOperand(fp, OFFSET_OF(T, y)));
1055 __ lw(a6, MemOperand(fp, OFFSET_OF(T, y)));
1059 __ lw(a6, MemOperand(fp, OFFSET_OF(T, y)));
1068 assm.GetCode(&desc);
1069 Handle<Code> code = isolate->factory()->NewCode(
1070 desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
1071 F3 f = FUNCTION_CAST<F3>(code->entry());
1079 Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
1087 // Test Cvt_d_uw and Trunc_uw_d macros.
1088 CcTest::InitializeVM();
1089 Isolate* isolate = CcTest::i_isolate();
1090 HandleScope scope(isolate);
1094 double cvt_small_out;
1095 uint32_t trunc_big_out;
1096 uint32_t trunc_small_out;
1097 uint32_t cvt_big_in;
1098 uint32_t cvt_small_in;
1102 MacroAssembler assm(isolate, NULL, 0);
1104 __ sw(a4, MemOperand(a0, OFFSET_OF(T, cvt_small_in)));
1105 __ Cvt_d_uw(f10, a4, f22);
1106 __ sdc1(f10, MemOperand(a0, OFFSET_OF(T, cvt_small_out)));
1108 __ Trunc_uw_d(f10, f10, f22);
1109 __ swc1(f10, MemOperand(a0, OFFSET_OF(T, trunc_small_out)));
1111 __ sw(a4, MemOperand(a0, OFFSET_OF(T, cvt_big_in)));
1112 __ Cvt_d_uw(f8, a4, f22);
1113 __ sdc1(f8, MemOperand(a0, OFFSET_OF(T, cvt_big_out)));
1115 __ Trunc_uw_d(f8, f8, f22);
1116 __ swc1(f8, MemOperand(a0, OFFSET_OF(T, trunc_big_out)));
1122 assm.GetCode(&desc);
1123 Handle<Code> code = isolate->factory()->NewCode(
1124 desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
1125 F3 f = FUNCTION_CAST<F3>(code->entry());
1127 t.cvt_big_in = 0xFFFFFFFF;
1128 t.cvt_small_in = 333;
1130 Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
1133 CHECK_EQ(t.cvt_big_out, static_cast<double>(t.cvt_big_in));
1134 CHECK_EQ(t.cvt_small_out, static_cast<double>(t.cvt_small_in));
1136 CHECK_EQ(static_cast<int>(t.trunc_big_out), static_cast<int>(t.cvt_big_in));
1137 CHECK_EQ(static_cast<int>(t.trunc_small_out),
1138 static_cast<int>(t.cvt_small_in));
1143 // Test round, floor, ceil, trunc, cvt.
1144 CcTest::InitializeVM();
1145 Isolate* isolate = CcTest::i_isolate();
1146 HandleScope scope(isolate);
1148 #define ROUND_STRUCT_ELEMENT(x) \
1149 int32_t x##_up_out; \
1150 int32_t x##_down_out; \
1151 int32_t neg_##x##_up_out; \
1152 int32_t neg_##x##_down_out; \
1153 uint32_t x##_err1_out; \
1154 uint32_t x##_err2_out; \
1155 uint32_t x##_err3_out; \
1156 uint32_t x##_err4_out; \
1157 int32_t x##_invalid_result;
1161 double round_down_in;
1162 double neg_round_up_in;
1163 double neg_round_down_in;
1169 ROUND_STRUCT_ELEMENT(round)
1170 ROUND_STRUCT_ELEMENT(floor)
1171 ROUND_STRUCT_ELEMENT(ceil)
1172 ROUND_STRUCT_ELEMENT(trunc)
1173 ROUND_STRUCT_ELEMENT(cvt)
1177 #undef ROUND_STRUCT_ELEMENT
1179 MacroAssembler assm(isolate, NULL, 0);
1183 // Disable FPU exceptions.
1184 __ ctc1(zero_reg, FCSR);
1185 #define RUN_ROUND_TEST(x) \
1186 __ ldc1(f0, MemOperand(a0, OFFSET_OF(T, round_up_in))); \
1187 __ x##_w_d(f0, f0); \
1188 __ swc1(f0, MemOperand(a0, OFFSET_OF(T, x##_up_out))); \
1190 __ ldc1(f0, MemOperand(a0, OFFSET_OF(T, round_down_in))); \
1191 __ x##_w_d(f0, f0); \
1192 __ swc1(f0, MemOperand(a0, OFFSET_OF(T, x##_down_out))); \
1194 __ ldc1(f0, MemOperand(a0, OFFSET_OF(T, neg_round_up_in))); \
1195 __ x##_w_d(f0, f0); \
1196 __ swc1(f0, MemOperand(a0, OFFSET_OF(T, neg_##x##_up_out))); \
1198 __ ldc1(f0, MemOperand(a0, OFFSET_OF(T, neg_round_down_in))); \
1199 __ x##_w_d(f0, f0); \
1200 __ swc1(f0, MemOperand(a0, OFFSET_OF(T, neg_##x##_down_out))); \
1202 __ ldc1(f0, MemOperand(a0, OFFSET_OF(T, err1_in))); \
1203 __ ctc1(zero_reg, FCSR); \
1204 __ x##_w_d(f0, f0); \
1205 __ cfc1(a2, FCSR); \
1206 __ sw(a2, MemOperand(a0, OFFSET_OF(T, x##_err1_out))); \
1208 __ ldc1(f0, MemOperand(a0, OFFSET_OF(T, err2_in))); \
1209 __ ctc1(zero_reg, FCSR); \
1210 __ x##_w_d(f0, f0); \
1211 __ cfc1(a2, FCSR); \
1212 __ sw(a2, MemOperand(a0, OFFSET_OF(T, x##_err2_out))); \
1214 __ ldc1(f0, MemOperand(a0, OFFSET_OF(T, err3_in))); \
1215 __ ctc1(zero_reg, FCSR); \
1216 __ x##_w_d(f0, f0); \
1217 __ cfc1(a2, FCSR); \
1218 __ sw(a2, MemOperand(a0, OFFSET_OF(T, x##_err3_out))); \
1220 __ ldc1(f0, MemOperand(a0, OFFSET_OF(T, err4_in))); \
1221 __ ctc1(zero_reg, FCSR); \
1222 __ x##_w_d(f0, f0); \
1223 __ cfc1(a2, FCSR); \
1224 __ sw(a2, MemOperand(a0, OFFSET_OF(T, x##_err4_out))); \
1225 __ swc1(f0, MemOperand(a0, OFFSET_OF(T, x##_invalid_result)));
1227 RUN_ROUND_TEST(round)
1228 RUN_ROUND_TEST(floor)
1229 RUN_ROUND_TEST(ceil)
1230 RUN_ROUND_TEST(trunc)
1240 assm.GetCode(&desc);
1241 Handle<Code> code = isolate->factory()->NewCode(
1242 desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
1243 F3 f = FUNCTION_CAST<F3>(code->entry());
1245 t.round_up_in = 123.51;
1246 t.round_down_in = 123.49;
1247 t.neg_round_up_in = -123.5;
1248 t.neg_round_down_in = -123.49;
1251 t.err3_in = static_cast<double>(1) + 0xFFFFFFFF;
1254 Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
1257 #define GET_FPU_ERR(x) (static_cast<int>(x & kFCSRFlagMask))
1258 #define CHECK_ROUND_RESULT(type) \
1259 CHECK(GET_FPU_ERR(t.type##_err1_out) & kFCSRInexactFlagMask); \
1260 CHECK_EQ(0, GET_FPU_ERR(t.type##_err2_out)); \
1261 CHECK(GET_FPU_ERR(t.type##_err3_out) & kFCSRInvalidOpFlagMask); \
1262 CHECK(GET_FPU_ERR(t.type##_err4_out) & kFCSRInvalidOpFlagMask); \
1263 CHECK_EQ(static_cast<int32_t>(kFPUInvalidResult), t.type##_invalid_result);
1265 CHECK_ROUND_RESULT(round);
1266 CHECK_ROUND_RESULT(floor);
1267 CHECK_ROUND_RESULT(ceil);
1268 CHECK_ROUND_RESULT(cvt);
1273 // Test chaining of label usages within instructions (issue 1644).
1274 CcTest::InitializeVM();
1275 Isolate* isolate = CcTest::i_isolate();
1276 HandleScope scope(isolate);
1277 Assembler assm(isolate, NULL, 0);
1280 __ beq(v0, v1, &target);
1282 __ bne(v0, v1, &target);
1289 // ----- mips64 tests -----------------------------------------------
1292 // Test 64-bit memory loads and stores.
1293 CcTest::InitializeVM();
1294 Isolate* isolate = CcTest::i_isolate();
1295 HandleScope scope(isolate);
1309 Assembler assm(isolate, NULL, 0);
1312 // Basic 32-bit word load/store, with un-signed data.
1313 __ lw(a4, MemOperand(a0, OFFSET_OF(T, ui)));
1314 __ sw(a4, MemOperand(a0, OFFSET_OF(T, r1)));
1316 // Check that the data got zero-extended into 64-bit a4.
1317 __ sd(a4, MemOperand(a0, OFFSET_OF(T, r2)));
1319 // Basic 32-bit word load/store, with SIGNED data.
1320 __ lw(a5, MemOperand(a0, OFFSET_OF(T, si)));
1321 __ sw(a5, MemOperand(a0, OFFSET_OF(T, r3)));
1323 // Check that the data got sign-extended into 64-bit a4.
1324 __ sd(a5, MemOperand(a0, OFFSET_OF(T, r4)));
1326 // 32-bit UNSIGNED word load/store, with SIGNED data.
1327 __ lwu(a6, MemOperand(a0, OFFSET_OF(T, si)));
1328 __ sw(a6, MemOperand(a0, OFFSET_OF(T, r5)));
1330 // Check that the data got zero-extended into 64-bit a4.
1331 __ sd(a6, MemOperand(a0, OFFSET_OF(T, r6)));
1333 // lh with positive data.
1334 __ lh(a5, MemOperand(a0, OFFSET_OF(T, ui)));
1335 __ sw(a5, MemOperand(a0, OFFSET_OF(T, r2)));
1337 // lh with negative data.
1338 __ lh(a6, MemOperand(a0, OFFSET_OF(T, si)));
1339 __ sw(a6, MemOperand(a0, OFFSET_OF(T, r3)));
1341 // lhu with negative data.
1342 __ lhu(a7, MemOperand(a0, OFFSET_OF(T, si)));
1343 __ sw(a7, MemOperand(a0, OFFSET_OF(T, r4)));
1345 // lb with negative data.
1346 __ lb(t0, MemOperand(a0, OFFSET_OF(T, si)));
1347 __ sw(t0, MemOperand(a0, OFFSET_OF(T, r5)));
1349 // // sh writes only 1/2 of word.
1351 __ ori(t1, t1, 0x3333);
1352 __ sw(t1, MemOperand(a0, OFFSET_OF(T, r6)));
1353 __ lhu(t1, MemOperand(a0, OFFSET_OF(T, si)));
1354 __ sh(t1, MemOperand(a0, OFFSET_OF(T, r6)));
1360 assm.GetCode(&desc);
1361 Handle<Code> code = isolate->factory()->NewCode(
1362 desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
1363 F3 f = FUNCTION_CAST<F3>(code->entry());
1366 t.r1 = 0x1111111111111111;
1367 t.r2 = 0x2222222222222222;
1368 t.r3 = 0x3333333333333333;
1369 t.r4 = 0x4444444444444444;
1370 t.r5 = 0x5555555555555555;
1371 t.r6 = 0x6666666666666666;
1372 Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
1375 // Unsigned data, 32 & 64.
1376 CHECK_EQ(static_cast<int64_t>(0x1111111144332211L), t.r1);
1377 CHECK_EQ(static_cast<int64_t>(0x0000000000002211L), t.r2);
1379 // Signed data, 32 & 64.
1380 CHECK_EQ(static_cast<int64_t>(0x33333333ffffbbccL), t.r3);
1381 CHECK_EQ(static_cast<int64_t>(0xffffffff0000bbccL), t.r4);
1383 // Signed data, 32 & 64.
1384 CHECK_EQ(static_cast<int64_t>(0x55555555ffffffccL), t.r5);
1385 CHECK_EQ(static_cast<int64_t>(0x000000003333bbccL), t.r6);
1389 TEST(jump_tables1) {
1390 // Test jump tables with forward jumps.
1391 CcTest::InitializeVM();
1392 Isolate* isolate = CcTest::i_isolate();
1393 HandleScope scope(isolate);
1394 Assembler assm(isolate, nullptr, 0);
1396 const int kNumCases = 512;
1397 int values[kNumCases];
1398 isolate->random_number_generator()->NextBytes(values, sizeof(values));
1399 Label labels[kNumCases];
1401 __ daddiu(sp, sp, -8);
1402 __ sd(ra, MemOperand(sp));
1403 if ((assm.pc_offset() & 7) == 0) {
1409 PredictableCodeSizeScope predictable(
1410 &assm, (kNumCases * 2 + 7) * Assembler::kInstrSize);
1417 __ daddu(at, at, ra);
1418 __ ld(at, MemOperand(at, 5 * Assembler::kInstrSize));
1421 for (int i = 0; i < kNumCases; ++i) {
1426 for (int i = 0; i < kNumCases; ++i) {
1427 __ bind(&labels[i]);
1428 __ lui(v0, (values[i] >> 16) & 0xffff);
1429 __ ori(v0, v0, values[i] & 0xffff);
1435 __ ld(ra, MemOperand(sp));
1436 __ daddiu(sp, sp, 8);
1441 assm.GetCode(&desc);
1442 Handle<Code> code = isolate->factory()->NewCode(
1443 desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
1445 code->Print(std::cout);
1447 F1 f = FUNCTION_CAST<F1>(code->entry());
1448 for (int i = 0; i < kNumCases; ++i) {
1449 int res = reinterpret_cast<int64_t>(CALL_GENERATED_CODE(f, i, 0, 0, 0, 0));
1450 ::printf("f(%d) = %d\n", i, res);
1451 CHECK_EQ(values[i], static_cast<int>(res));
1456 TEST(jump_tables2) {
1457 // Test jump tables with backward jumps.
1458 CcTest::InitializeVM();
1459 Isolate* isolate = CcTest::i_isolate();
1460 HandleScope scope(isolate);
1461 Assembler assm(isolate, nullptr, 0);
1463 const int kNumCases = 512;
1464 int values[kNumCases];
1465 isolate->random_number_generator()->NextBytes(values, sizeof(values));
1466 Label labels[kNumCases];
1468 __ daddiu(sp, sp, -8);
1469 __ sd(ra, MemOperand(sp));
1471 Label done, dispatch;
1475 for (int i = 0; i < kNumCases; ++i) {
1476 __ bind(&labels[i]);
1477 __ lui(v0, (values[i] >> 16) & 0xffff);
1478 __ ori(v0, v0, values[i] & 0xffff);
1483 if ((assm.pc_offset() & 7) == 0) {
1488 PredictableCodeSizeScope predictable(
1489 &assm, (kNumCases * 2 + 7) * Assembler::kInstrSize);
1496 __ daddu(at, at, ra);
1497 __ ld(at, MemOperand(at, 5 * Assembler::kInstrSize));
1500 for (int i = 0; i < kNumCases; ++i) {
1506 __ ld(ra, MemOperand(sp));
1507 __ daddiu(sp, sp, 8);
1512 assm.GetCode(&desc);
1513 Handle<Code> code = isolate->factory()->NewCode(
1514 desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
1516 code->Print(std::cout);
1518 F1 f = FUNCTION_CAST<F1>(code->entry());
1519 for (int i = 0; i < kNumCases; ++i) {
1520 int res = reinterpret_cast<int64_t>(CALL_GENERATED_CODE(f, i, 0, 0, 0, 0));
1521 ::printf("f(%d) = %d\n", i, res);
1522 CHECK_EQ(values[i], res);
1527 TEST(jump_tables3) {
1528 // Test jump tables with backward jumps and embedded heap objects.
1529 CcTest::InitializeVM();
1530 Isolate* isolate = CcTest::i_isolate();
1531 HandleScope scope(isolate);
1532 Assembler assm(isolate, nullptr, 0);
1534 const int kNumCases = 512;
1535 Handle<Object> values[kNumCases];
1536 for (int i = 0; i < kNumCases; ++i) {
1537 double value = isolate->random_number_generator()->NextDouble();
1538 values[i] = isolate->factory()->NewHeapNumber(value, IMMUTABLE, TENURED);
1540 Label labels[kNumCases];
1544 __ daddiu(sp, sp, -8);
1545 __ sd(ra, MemOperand(sp));
1547 Label done, dispatch;
1551 for (int i = 0; i < kNumCases; ++i) {
1552 __ bind(&labels[i]);
1554 imm64 = reinterpret_cast<intptr_t>(obj);
1555 __ lui(v0, (imm64 >> 32) & kImm16Mask);
1556 __ ori(v0, v0, (imm64 >> 16) & kImm16Mask);
1557 __ dsll(v0, v0, 16);
1558 __ ori(v0, v0, imm64 & kImm16Mask);
1564 if ((assm.pc_offset() & 7) == 0) {
1569 PredictableCodeSizeScope predictable(
1570 &assm, (kNumCases * 2 + 7) * Assembler::kInstrSize);
1577 __ daddu(at, at, ra);
1578 __ ld(at, MemOperand(at, 5 * Assembler::kInstrSize));
1581 for (int i = 0; i < kNumCases; ++i) {
1587 __ ld(ra, MemOperand(sp));
1588 __ addiu(sp, sp, 8);
1593 assm.GetCode(&desc);
1594 Handle<Code> code = isolate->factory()->NewCode(
1595 desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
1597 // code->Print(std::cout);
1599 F1 f = FUNCTION_CAST<F1>(code->entry());
1600 for (int i = 0; i < kNumCases; ++i) {
1601 Handle<Object> result(CALL_GENERATED_CODE(f, i, 0, 0, 0, 0), isolate);
1603 ::printf("f(%d) = ", i);
1604 result->Print(std::cout);
1607 CHECK(values[i].is_identical_to(result));