1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 #include "src/disassembler.h"
31 #include "src/factory.h"
32 #include "src/macro-assembler.h"
33 #include "src/mips64/macro-assembler-mips64.h"
34 #include "src/mips64/simulator-mips64.h"
36 #include "test/cctest/cctest.h"
38 using namespace v8::internal;
41 // Define these function prototypes to match JSEntryFunction in execution.cc.
42 typedef Object* (*F1)(int x, int p1, int p2, int p3, int p4);
43 typedef Object* (*F2)(int x, int y, int p2, int p3, int p4);
44 typedef Object* (*F3)(void* p, int p1, int p2, int p3, int p4);
51 CcTest::InitializeVM();
52 Isolate* isolate = CcTest::i_isolate();
53 HandleScope scope(isolate);
55 MacroAssembler assm(isolate, NULL, 0);
64 Handle<Code> code = isolate->factory()->NewCode(
65 desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
66 F2 f = FUNCTION_CAST<F2>(code->entry());
68 reinterpret_cast<int64_t>(CALL_GENERATED_CODE(f, 0xab0, 0xc, 0, 0, 0));
69 ::printf("f() = %ld\n", res);
70 CHECK_EQ(0xabcL, res);
75 CcTest::InitializeVM();
76 Isolate* isolate = CcTest::i_isolate();
77 HandleScope scope(isolate);
79 MacroAssembler assm(isolate, NULL, 0);
93 __ Branch(&L, ne, v1, Operand((int64_t)0));
101 Handle<Code> code = isolate->factory()->NewCode(
102 desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
103 F1 f = FUNCTION_CAST<F1>(code->entry());
105 reinterpret_cast<int64_t>(CALL_GENERATED_CODE(f, 50, 0, 0, 0, 0));
106 ::printf("f() = %ld\n", res);
107 CHECK_EQ(1275L, res);
112 CcTest::InitializeVM();
113 Isolate* isolate = CcTest::i_isolate();
114 HandleScope scope(isolate);
116 MacroAssembler assm(isolate, NULL, 0);
120 // ----- Test all instructions.
122 // Test lui, ori, and addiu, used in the li pseudo-instruction.
123 // This way we can then safely load registers with chosen values.
125 __ ori(a4, zero_reg, 0);
128 __ ori(a4, a4, 0x0f0f);
129 __ ori(a4, a4, 0xf0f0);
131 __ addiu(a6, a5, -0x10);
133 // Load values in temporary registers.
134 __ li(a4, 0x00000004);
135 __ li(a5, 0x00001234);
136 __ li(a6, 0x12345678);
137 __ li(a7, 0x7fffffff);
138 __ li(t0, 0xfffffffc);
139 __ li(t1, 0xffffedcc);
140 __ li(t2, 0xedcba988);
141 __ li(t3, 0x80000000);
144 __ srl(v0, a6, 8); // 0x00123456
145 __ sll(v0, v0, 11); // 0x91a2b000
146 __ sra(v0, v0, 3); // 0xf2345600
147 __ srav(v0, v0, a4); // 0xff234560
148 __ sllv(v0, v0, a4); // 0xf2345600
149 __ srlv(v0, v0, a4); // 0x0f234560
150 __ Branch(&error, ne, v0, Operand(0x0f234560));
153 __ addu(v0, a4, a5); // 0x00001238
154 __ subu(v0, v0, a4); // 0x00001234
155 __ Branch(&error, ne, v0, Operand(0x00001234));
157 __ addu(v1, a7, a4); // 32bit addu result is sign-extended into 64bit reg.
158 __ Branch(&error, ne, v1, Operand(0xffffffff80000003));
160 __ subu(v1, t3, a4); // 0x7ffffffc
161 __ Branch(&error, ne, v1, Operand(0x7ffffffc));
164 __ and_(v0, a5, a6); // 0x0000000000001230
165 __ or_(v0, v0, a5); // 0x0000000000001234
166 __ xor_(v0, v0, a6); // 0x000000001234444c
167 __ nor(v0, v0, a6); // 0xffffffffedcba987
168 __ Branch(&error, ne, v0, Operand(0xffffffffedcba983));
171 // Shift both 32bit number to left, to preserve meaning of next comparison.
172 __ dsll32(a7, a7, 0);
173 __ dsll32(t3, t3, 0);
176 __ Branch(&error, ne, v0, Operand(0x1));
179 __ Branch(&error, ne, v0, Operand(zero_reg));
182 // Restore original values in registers.
183 __ dsrl32(a7, a7, 0);
184 __ dsrl32(t3, t3, 0);
185 // End of SPECIAL class.
187 __ addiu(v0, zero_reg, 0x7421); // 0x00007421
188 __ addiu(v0, v0, -0x1); // 0x00007420
189 __ addiu(v0, v0, -0x20); // 0x00007400
190 __ Branch(&error, ne, v0, Operand(0x00007400));
192 __ addiu(v1, a7, 0x1); // 0x80000000 - result is sign-extended.
193 __ Branch(&error, ne, v1, Operand(0xffffffff80000000));
196 __ slti(v0, a5, 0x00002000); // 0x1
197 __ slti(v0, v0, 0xffff8000); // 0x0
198 __ Branch(&error, ne, v0, Operand(zero_reg));
200 __ sltiu(v0, a5, 0x00002000); // 0x1
201 __ sltiu(v0, v0, 0x00008000); // 0x1
202 __ Branch(&error, ne, v0, Operand(0x1));
205 __ andi(v0, a5, 0xf0f0); // 0x00001030
206 __ ori(v0, v0, 0x8a00); // 0x00009a30
207 __ xori(v0, v0, 0x83cc); // 0x000019fc
208 __ Branch(&error, ne, v0, Operand(0x000019fc));
210 __ lui(v1, 0x8123); // Result is sign-extended into 64bit register.
211 __ Branch(&error, ne, v1, Operand(0xffffffff81230000));
214 // Bit twiddling instructions & conditional moves.
215 // Uses a4-t3 as set above.
216 __ Clz(v0, a4); // 29
217 __ Clz(v1, a5); // 19
218 __ addu(v0, v0, v1); // 48
220 __ addu(v0, v0, v1); // 51
222 __ addu(v0, v0, v1); // 51
223 __ Branch(&error, ne, v0, Operand(51));
224 __ Movn(a0, a7, a4); // Move a0<-a7 (a4 is NOT 0).
225 __ Ins(a0, a5, 12, 8); // 0x7ff34fff
226 __ Branch(&error, ne, a0, Operand(0x7ff34fff));
227 __ Movz(a0, t2, t3); // a0 not updated (t3 is NOT 0).
228 __ Ext(a1, a0, 8, 12); // 0x34f
229 __ Branch(&error, ne, a1, Operand(0x34f));
230 __ Movz(a0, t2, v1); // a0<-t2, v0 is 0, from 8 instr back.
231 __ Branch(&error, ne, a0, Operand(t2));
233 // Everything was correctly executed. Load the expected result.
234 __ li(v0, 0x31415926);
239 // Got an error. Return a wrong result.
248 Handle<Code> code = isolate->factory()->NewCode(
249 desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
250 F2 f = FUNCTION_CAST<F2>(code->entry());
252 reinterpret_cast<int64_t>(CALL_GENERATED_CODE(f, 0xab0, 0xc, 0, 0, 0));
253 ::printf("f() = %ld\n", res);
255 CHECK_EQ(0x31415926L, res);
260 // Test floating point instructions.
261 CcTest::InitializeVM();
262 Isolate* isolate = CcTest::i_isolate();
263 HandleScope scope(isolate);
278 // Create a function that accepts &t, and loads, manipulates, and stores
279 // the doubles t.a ... t.f.
280 MacroAssembler assm(isolate, NULL, 0);
283 __ ldc1(f4, MemOperand(a0, OFFSET_OF(T, a)) );
284 __ ldc1(f6, MemOperand(a0, OFFSET_OF(T, b)) );
285 __ add_d(f8, f4, f6);
286 __ sdc1(f8, MemOperand(a0, OFFSET_OF(T, c)) ); // c = a + b.
288 __ mov_d(f10, f8); // c
289 __ neg_d(f12, f6); // -b
290 __ sub_d(f10, f10, f12);
291 __ sdc1(f10, MemOperand(a0, OFFSET_OF(T, d)) ); // d = c - (-b).
293 __ sdc1(f4, MemOperand(a0, OFFSET_OF(T, b)) ); // b = a.
297 __ cvt_d_w(f14, f14); // f14 = 120.0.
298 __ mul_d(f10, f10, f14);
299 __ sdc1(f10, MemOperand(a0, OFFSET_OF(T, e)) ); // e = d * 120 = 1.8066e16.
301 __ div_d(f12, f10, f4);
302 __ sdc1(f12, MemOperand(a0, OFFSET_OF(T, f)) ); // f = e / a = 120.44.
305 __ sdc1(f14, MemOperand(a0, OFFSET_OF(T, g)) );
306 // g = sqrt(f) = 10.97451593465515908537
308 if (kArchVariant == kMips64r2) {
309 __ ldc1(f4, MemOperand(a0, OFFSET_OF(T, h)) );
310 __ ldc1(f6, MemOperand(a0, OFFSET_OF(T, i)) );
311 __ madd_d(f14, f6, f4, f6);
312 __ sdc1(f14, MemOperand(a0, OFFSET_OF(T, h)) );
320 Handle<Code> code = isolate->factory()->NewCode(
321 desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
322 F3 f = FUNCTION_CAST<F3>(code->entry());
331 Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
333 CHECK_EQ(1.5e14, t.a);
334 CHECK_EQ(1.5e14, t.b);
335 CHECK_EQ(1.50275e14, t.c);
336 CHECK_EQ(1.50550e14, t.d);
337 CHECK_EQ(1.8066e16, t.e);
338 CHECK_EQ(120.44, t.f);
339 CHECK_EQ(10.97451593465515908537, t.g);
340 if (kArchVariant == kMips64r2) {
341 CHECK_EQ(6.875, t.h);
347 // Test moves between floating point and integer registers.
348 CcTest::InitializeVM();
349 Isolate* isolate = CcTest::i_isolate();
350 HandleScope scope(isolate);
359 Assembler assm(isolate, NULL, 0);
362 __ ldc1(f4, MemOperand(a0, OFFSET_OF(T, a)) );
363 __ ldc1(f5, MemOperand(a0, OFFSET_OF(T, b)) );
365 // Swap f4 and f5, by using 3 integer registers, a4-a6,
366 // both two 32-bit chunks, and one 64-bit chunk.
367 // mXhc1 is mips32/64-r2 only, not r1,
368 // but we will not support r1 in practice.
377 // Store the swapped f4 and f5 back to memory.
378 __ sdc1(f4, MemOperand(a0, OFFSET_OF(T, a)) );
379 __ sdc1(f5, MemOperand(a0, OFFSET_OF(T, c)) );
386 Handle<Code> code = isolate->factory()->NewCode(
387 desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
388 F3 f = FUNCTION_CAST<F3>(code->entry());
392 Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
395 CHECK_EQ(2.75e11, t.a);
396 CHECK_EQ(2.75e11, t.b);
397 CHECK_EQ(1.5e22, t.c);
402 // Test conversions between doubles and integers.
403 CcTest::InitializeVM();
404 Isolate* isolate = CcTest::i_isolate();
405 HandleScope scope(isolate);
415 Assembler assm(isolate, NULL, 0);
418 // Load all structure elements to registers.
419 __ ldc1(f4, MemOperand(a0, OFFSET_OF(T, a)) );
420 __ ldc1(f6, MemOperand(a0, OFFSET_OF(T, b)) );
421 __ lw(a4, MemOperand(a0, OFFSET_OF(T, i)) );
422 __ lw(a5, MemOperand(a0, OFFSET_OF(T, j)) );
424 // Convert double in f4 to int in element i.
427 __ sw(a6, MemOperand(a0, OFFSET_OF(T, i)) );
429 // Convert double in f6 to int in element j.
432 __ sw(a7, MemOperand(a0, OFFSET_OF(T, j)) );
434 // Convert int in original i (a4) to double in a.
437 __ sdc1(f0, MemOperand(a0, OFFSET_OF(T, a)) );
439 // Convert int in original j (a5) to double in b.
442 __ sdc1(f2, MemOperand(a0, OFFSET_OF(T, b)) );
449 Handle<Code> code = isolate->factory()->NewCode(
450 desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
451 F3 f = FUNCTION_CAST<F3>(code->entry());
456 Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
459 CHECK_EQ(12345678.0, t.a);
460 CHECK_EQ(-100000.0, t.b);
461 CHECK_EQ(15000, t.i);
462 CHECK_EQ(275000000, t.j);
467 // Test simple memory loads and stores.
468 CcTest::InitializeVM();
469 Isolate* isolate = CcTest::i_isolate();
470 HandleScope scope(isolate);
484 Assembler assm(isolate, NULL, 0);
487 // Basic word load/store.
488 __ lw(a4, MemOperand(a0, OFFSET_OF(T, ui)) );
489 __ sw(a4, MemOperand(a0, OFFSET_OF(T, r1)) );
491 // lh with positive data.
492 __ lh(a5, MemOperand(a0, OFFSET_OF(T, ui)) );
493 __ sw(a5, MemOperand(a0, OFFSET_OF(T, r2)) );
495 // lh with negative data.
496 __ lh(a6, MemOperand(a0, OFFSET_OF(T, si)) );
497 __ sw(a6, MemOperand(a0, OFFSET_OF(T, r3)) );
499 // lhu with negative data.
500 __ lhu(a7, MemOperand(a0, OFFSET_OF(T, si)) );
501 __ sw(a7, MemOperand(a0, OFFSET_OF(T, r4)) );
503 // lb with negative data.
504 __ lb(t0, MemOperand(a0, OFFSET_OF(T, si)) );
505 __ sw(t0, MemOperand(a0, OFFSET_OF(T, r5)) );
507 // sh writes only 1/2 of word.
509 __ ori(t1, t1, 0x3333);
510 __ sw(t1, MemOperand(a0, OFFSET_OF(T, r6)) );
511 __ lhu(t1, MemOperand(a0, OFFSET_OF(T, si)) );
512 __ sh(t1, MemOperand(a0, OFFSET_OF(T, r6)) );
519 Handle<Code> code = isolate->factory()->NewCode(
520 desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
521 F3 f = FUNCTION_CAST<F3>(code->entry());
524 Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
527 CHECK_EQ(0x11223344, t.r1);
528 CHECK_EQ(0x3344, t.r2);
529 CHECK_EQ(0xffffbbcc, t.r3);
530 CHECK_EQ(0x0000bbcc, t.r4);
531 CHECK_EQ(0xffffffcc, t.r5);
532 CHECK_EQ(0x3333bbcc, t.r6);
537 // Test floating point compare and branch instructions.
538 CcTest::InitializeVM();
539 Isolate* isolate = CcTest::i_isolate();
540 HandleScope scope(isolate);
553 // Create a function that accepts &t, and loads, manipulates, and stores
554 // the doubles t.a ... t.f.
555 MacroAssembler assm(isolate, NULL, 0);
556 Label neither_is_nan, less_than, outa_here;
558 __ ldc1(f4, MemOperand(a0, OFFSET_OF(T, a)) );
559 __ ldc1(f6, MemOperand(a0, OFFSET_OF(T, b)) );
560 if (kArchVariant != kMips64r6) {
562 __ bc1f(&neither_is_nan);
564 __ cmp(UN, L, f2, f4, f6);
565 __ bc1eqz(&neither_is_nan, f2);
568 __ sw(zero_reg, MemOperand(a0, OFFSET_OF(T, result)) );
569 __ Branch(&outa_here);
571 __ bind(&neither_is_nan);
573 if (kArchVariant == kMips64r6) {
574 __ cmp(OLT, L, f2, f6, f4);
575 __ bc1nez(&less_than, f2);
577 __ c(OLT, D, f6, f4, 2);
578 __ bc1t(&less_than, 2);
582 __ sw(zero_reg, MemOperand(a0, OFFSET_OF(T, result)) );
583 __ Branch(&outa_here);
586 __ Addu(a4, zero_reg, Operand(1));
587 __ sw(a4, MemOperand(a0, OFFSET_OF(T, result)) ); // Set true.
590 // This test-case should have additional tests.
599 Handle<Code> code = isolate->factory()->NewCode(
600 desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
601 F3 f = FUNCTION_CAST<F3>(code->entry());
609 Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
611 CHECK_EQ(1.5e14, t.a);
612 CHECK_EQ(2.75e11, t.b);
613 CHECK_EQ(1, t.result);
618 // Test ROTR and ROTRV instructions.
619 CcTest::InitializeVM();
620 Isolate* isolate = CcTest::i_isolate();
621 HandleScope scope(isolate);
625 int32_t result_rotr_4;
626 int32_t result_rotr_8;
627 int32_t result_rotr_12;
628 int32_t result_rotr_16;
629 int32_t result_rotr_20;
630 int32_t result_rotr_24;
631 int32_t result_rotr_28;
632 int32_t result_rotrv_4;
633 int32_t result_rotrv_8;
634 int32_t result_rotrv_12;
635 int32_t result_rotrv_16;
636 int32_t result_rotrv_20;
637 int32_t result_rotrv_24;
638 int32_t result_rotrv_28;
642 MacroAssembler assm(isolate, NULL, 0);
645 __ lw(a4, MemOperand(a0, OFFSET_OF(T, input)) );
647 // ROTR instruction (called through the Ror macro).
648 __ Ror(a5, a4, 0x0004);
649 __ Ror(a6, a4, 0x0008);
650 __ Ror(a7, a4, 0x000c);
651 __ Ror(t0, a4, 0x0010);
652 __ Ror(t1, a4, 0x0014);
653 __ Ror(t2, a4, 0x0018);
654 __ Ror(t3, a4, 0x001c);
657 __ sw(a5, MemOperand(a0, OFFSET_OF(T, result_rotr_4)) );
658 __ sw(a6, MemOperand(a0, OFFSET_OF(T, result_rotr_8)) );
659 __ sw(a7, MemOperand(a0, OFFSET_OF(T, result_rotr_12)) );
660 __ sw(t0, MemOperand(a0, OFFSET_OF(T, result_rotr_16)) );
661 __ sw(t1, MemOperand(a0, OFFSET_OF(T, result_rotr_20)) );
662 __ sw(t2, MemOperand(a0, OFFSET_OF(T, result_rotr_24)) );
663 __ sw(t3, MemOperand(a0, OFFSET_OF(T, result_rotr_28)) );
665 // ROTRV instruction (called through the Ror macro).
682 __ sw(a5, MemOperand(a0, OFFSET_OF(T, result_rotrv_4)) );
683 __ sw(a6, MemOperand(a0, OFFSET_OF(T, result_rotrv_8)) );
684 __ sw(a7, MemOperand(a0, OFFSET_OF(T, result_rotrv_12)) );
685 __ sw(t0, MemOperand(a0, OFFSET_OF(T, result_rotrv_16)) );
686 __ sw(t1, MemOperand(a0, OFFSET_OF(T, result_rotrv_20)) );
687 __ sw(t2, MemOperand(a0, OFFSET_OF(T, result_rotrv_24)) );
688 __ sw(t3, MemOperand(a0, OFFSET_OF(T, result_rotrv_28)) );
695 Handle<Code> code = isolate->factory()->NewCode(
696 desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
697 F3 f = FUNCTION_CAST<F3>(code->entry());
698 t.input = 0x12345678;
699 Object* dummy = CALL_GENERATED_CODE(f, &t, 0x0, 0, 0, 0);
701 CHECK_EQ(0x81234567, t.result_rotr_4);
702 CHECK_EQ(0x78123456, t.result_rotr_8);
703 CHECK_EQ(0x67812345, t.result_rotr_12);
704 CHECK_EQ(0x56781234, t.result_rotr_16);
705 CHECK_EQ(0x45678123, t.result_rotr_20);
706 CHECK_EQ(0x34567812, t.result_rotr_24);
707 CHECK_EQ(0x23456781, t.result_rotr_28);
709 CHECK_EQ(0x81234567, t.result_rotrv_4);
710 CHECK_EQ(0x78123456, t.result_rotrv_8);
711 CHECK_EQ(0x67812345, t.result_rotrv_12);
712 CHECK_EQ(0x56781234, t.result_rotrv_16);
713 CHECK_EQ(0x45678123, t.result_rotrv_20);
714 CHECK_EQ(0x34567812, t.result_rotrv_24);
715 CHECK_EQ(0x23456781, t.result_rotrv_28);
720 // Test BRANCH improvements.
721 CcTest::InitializeVM();
722 Isolate* isolate = CcTest::i_isolate();
723 HandleScope scope(isolate);
725 MacroAssembler assm(isolate, NULL, 0);
726 Label exit, exit2, exit3;
728 __ Branch(&exit, ge, a0, Operand(zero_reg));
729 __ Branch(&exit2, ge, a0, Operand(0x00001FFF));
730 __ Branch(&exit3, ge, a0, Operand(0x0001FFFF));
740 isolate->factory()->NewCode(
741 desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
746 // Test conversions between doubles and long integers.
747 // Test hos the long ints map to FP regs pairs.
748 CcTest::InitializeVM();
749 Isolate* isolate = CcTest::i_isolate();
750 HandleScope scope(isolate);
760 int64_t long_as_int64;
763 int64_t b_long_as_int64;
767 Assembler assm(isolate, NULL, 0);
770 if (kArchVariant == kMips64r2) {
771 // Rewritten for FR=1 FPU mode:
772 // - 32 FP regs of 64-bits each, no odd/even pairs.
773 // - Note that cvt_l_d/cvt_d_l ARE legal in FR=1 mode.
774 // Load all structure elements to registers.
775 __ ldc1(f0, MemOperand(a0, OFFSET_OF(T, a)));
777 // Save the raw bits of the double.
780 __ sw(a4, MemOperand(a0, OFFSET_OF(T, dbl_mant)));
781 __ sw(a5, MemOperand(a0, OFFSET_OF(T, dbl_exp)));
783 // Convert double in f0 to long, save hi/lo parts.
785 __ mfc1(a4, f0); // f0 LS 32 bits of long.
786 __ mfhc1(a5, f0); // f0 MS 32 bits of long.
787 __ sw(a4, MemOperand(a0, OFFSET_OF(T, long_lo)));
788 __ sw(a5, MemOperand(a0, OFFSET_OF(T, long_hi)));
790 // Combine the high/low ints, convert back to double.
791 __ dsll32(a6, a5, 0); // Move a5 to high bits of a6.
795 __ sdc1(f1, MemOperand(a0, OFFSET_OF(T, a_converted)));
798 // Convert the b long integers to double b.
799 __ lw(a4, MemOperand(a0, OFFSET_OF(T, b_long_lo)));
800 __ lw(a5, MemOperand(a0, OFFSET_OF(T, b_long_hi)));
801 __ mtc1(a4, f8); // f8 LS 32-bits.
802 __ mthc1(a5, f8); // f8 MS 32-bits.
804 __ sdc1(f10, MemOperand(a0, OFFSET_OF(T, b)));
806 // Convert double b back to long-int.
807 __ ldc1(f31, MemOperand(a0, OFFSET_OF(T, b)));
808 __ cvt_l_d(f31, f31);
810 __ sd(a7, MemOperand(a0, OFFSET_OF(T, b_long_as_int64)));
818 Handle<Code> code = isolate->factory()->NewCode(
819 desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
820 F3 f = FUNCTION_CAST<F3>(code->entry());
821 t.a = 2.147483647e9; // 0x7fffffff -> 0x41DFFFFFFFC00000 as double.
822 t.b_long_hi = 0x000000ff; // 0xFF00FF00FF -> 0x426FE01FE01FE000 as double.
823 t.b_long_lo = 0x00ff00ff;
824 Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
827 CHECK_EQ(0x41DFFFFF, t.dbl_exp);
828 CHECK_EQ(0xFFC00000, t.dbl_mant);
829 CHECK_EQ(0, t.long_hi);
830 CHECK_EQ(0x7fffffff, t.long_lo);
831 CHECK_EQ(2.147483647e9, t.a_converted);
833 // 0xFF00FF00FF -> 1.095233372415e12.
834 CHECK_EQ(1.095233372415e12, t.b);
835 CHECK_EQ(0xFF00FF00FF, t.b_long_as_int64);
841 // Do not run test on MIPS64r6, as these instructions are removed.
842 if (kArchVariant != kMips64r6) {
843 // Test LWL, LWR, SWL and SWR instructions.
844 CcTest::InitializeVM();
845 Isolate* isolate = CcTest::i_isolate();
846 HandleScope scope(isolate);
870 Assembler assm(isolate, NULL, 0);
872 // Test all combinations of LWL and vAddr.
873 __ lw(a4, MemOperand(a0, OFFSET_OF(T, reg_init)) );
874 __ lwl(a4, MemOperand(a0, OFFSET_OF(T, mem_init)) );
875 __ sw(a4, MemOperand(a0, OFFSET_OF(T, lwl_0)) );
877 __ lw(a5, MemOperand(a0, OFFSET_OF(T, reg_init)) );
878 __ lwl(a5, MemOperand(a0, OFFSET_OF(T, mem_init) + 1) );
879 __ sw(a5, MemOperand(a0, OFFSET_OF(T, lwl_1)) );
881 __ lw(a6, MemOperand(a0, OFFSET_OF(T, reg_init)) );
882 __ lwl(a6, MemOperand(a0, OFFSET_OF(T, mem_init) + 2) );
883 __ sw(a6, MemOperand(a0, OFFSET_OF(T, lwl_2)) );
885 __ lw(a7, MemOperand(a0, OFFSET_OF(T, reg_init)) );
886 __ lwl(a7, MemOperand(a0, OFFSET_OF(T, mem_init) + 3) );
887 __ sw(a7, MemOperand(a0, OFFSET_OF(T, lwl_3)) );
889 // Test all combinations of LWR and vAddr.
890 __ lw(a4, MemOperand(a0, OFFSET_OF(T, reg_init)) );
891 __ lwr(a4, MemOperand(a0, OFFSET_OF(T, mem_init)) );
892 __ sw(a4, MemOperand(a0, OFFSET_OF(T, lwr_0)) );
894 __ lw(a5, MemOperand(a0, OFFSET_OF(T, reg_init)) );
895 __ lwr(a5, MemOperand(a0, OFFSET_OF(T, mem_init) + 1) );
896 __ sw(a5, MemOperand(a0, OFFSET_OF(T, lwr_1)) );
898 __ lw(a6, MemOperand(a0, OFFSET_OF(T, reg_init)) );
899 __ lwr(a6, MemOperand(a0, OFFSET_OF(T, mem_init) + 2) );
900 __ sw(a6, MemOperand(a0, OFFSET_OF(T, lwr_2)) );
902 __ lw(a7, MemOperand(a0, OFFSET_OF(T, reg_init)) );
903 __ lwr(a7, MemOperand(a0, OFFSET_OF(T, mem_init) + 3) );
904 __ sw(a7, MemOperand(a0, OFFSET_OF(T, lwr_3)) );
906 // Test all combinations of SWL and vAddr.
907 __ lw(a4, MemOperand(a0, OFFSET_OF(T, mem_init)) );
908 __ sw(a4, MemOperand(a0, OFFSET_OF(T, swl_0)) );
909 __ lw(a4, MemOperand(a0, OFFSET_OF(T, reg_init)) );
910 __ swl(a4, MemOperand(a0, OFFSET_OF(T, swl_0)) );
912 __ lw(a5, MemOperand(a0, OFFSET_OF(T, mem_init)) );
913 __ sw(a5, MemOperand(a0, OFFSET_OF(T, swl_1)) );
914 __ lw(a5, MemOperand(a0, OFFSET_OF(T, reg_init)) );
915 __ swl(a5, MemOperand(a0, OFFSET_OF(T, swl_1) + 1) );
917 __ lw(a6, MemOperand(a0, OFFSET_OF(T, mem_init)) );
918 __ sw(a6, MemOperand(a0, OFFSET_OF(T, swl_2)) );
919 __ lw(a6, MemOperand(a0, OFFSET_OF(T, reg_init)) );
920 __ swl(a6, MemOperand(a0, OFFSET_OF(T, swl_2) + 2) );
922 __ lw(a7, MemOperand(a0, OFFSET_OF(T, mem_init)) );
923 __ sw(a7, MemOperand(a0, OFFSET_OF(T, swl_3)) );
924 __ lw(a7, MemOperand(a0, OFFSET_OF(T, reg_init)) );
925 __ swl(a7, MemOperand(a0, OFFSET_OF(T, swl_3) + 3) );
927 // Test all combinations of SWR and vAddr.
928 __ lw(a4, MemOperand(a0, OFFSET_OF(T, mem_init)) );
929 __ sw(a4, MemOperand(a0, OFFSET_OF(T, swr_0)) );
930 __ lw(a4, MemOperand(a0, OFFSET_OF(T, reg_init)) );
931 __ swr(a4, MemOperand(a0, OFFSET_OF(T, swr_0)) );
933 __ lw(a5, MemOperand(a0, OFFSET_OF(T, mem_init)) );
934 __ sw(a5, MemOperand(a0, OFFSET_OF(T, swr_1)) );
935 __ lw(a5, MemOperand(a0, OFFSET_OF(T, reg_init)) );
936 __ swr(a5, MemOperand(a0, OFFSET_OF(T, swr_1) + 1) );
938 __ lw(a6, MemOperand(a0, OFFSET_OF(T, mem_init)) );
939 __ sw(a6, MemOperand(a0, OFFSET_OF(T, swr_2)) );
940 __ lw(a6, MemOperand(a0, OFFSET_OF(T, reg_init)) );
941 __ swr(a6, MemOperand(a0, OFFSET_OF(T, swr_2) + 2) );
943 __ lw(a7, MemOperand(a0, OFFSET_OF(T, mem_init)) );
944 __ sw(a7, MemOperand(a0, OFFSET_OF(T, swr_3)) );
945 __ lw(a7, MemOperand(a0, OFFSET_OF(T, reg_init)) );
946 __ swr(a7, MemOperand(a0, OFFSET_OF(T, swr_3) + 3) );
953 Handle<Code> code = isolate->factory()->NewCode(
954 desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
955 F3 f = FUNCTION_CAST<F3>(code->entry());
956 t.reg_init = 0xaabbccdd;
957 t.mem_init = 0x11223344;
959 Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
962 CHECK_EQ(0x44bbccdd, t.lwl_0);
963 CHECK_EQ(0x3344ccdd, t.lwl_1);
964 CHECK_EQ(0x223344dd, t.lwl_2);
965 CHECK_EQ(0x11223344, t.lwl_3);
967 CHECK_EQ(0x11223344, t.lwr_0);
968 CHECK_EQ(0xaa112233, t.lwr_1);
969 CHECK_EQ(0xaabb1122, t.lwr_2);
970 CHECK_EQ(0xaabbcc11, t.lwr_3);
972 CHECK_EQ(0x112233aa, t.swl_0);
973 CHECK_EQ(0x1122aabb, t.swl_1);
974 CHECK_EQ(0x11aabbcc, t.swl_2);
975 CHECK_EQ(0xaabbccdd, t.swl_3);
977 CHECK_EQ(0xaabbccdd, t.swr_0);
978 CHECK_EQ(0xbbccdd44, t.swr_1);
979 CHECK_EQ(0xccdd3344, t.swr_2);
980 CHECK_EQ(0xdd223344, t.swr_3);
986 CcTest::InitializeVM();
987 Isolate* isolate = CcTest::i_isolate();
988 HandleScope scope(isolate);
1000 MacroAssembler assm(isolate, NULL, 0);
1002 __ mov(t2, fp); // Save frame pointer.
1003 __ mov(fp, a0); // Access struct T by fp.
1004 __ lw(a4, MemOperand(a0, OFFSET_OF(T, y)) );
1005 __ lw(a7, MemOperand(a0, OFFSET_OF(T, y4)) );
1007 __ addu(a5, a4, a7);
1008 __ subu(t0, a4, a7);
1010 __ push(a4); // These instructions disappear after opt.
1012 __ addu(a4, a4, a4);
1014 __ Pop(); // These instructions disappear after opt.
1017 __ push(a7); // These instructions disappear after opt.
1023 __ sw(a4, MemOperand(fp, OFFSET_OF(T, y)) );
1024 __ lw(a4, MemOperand(fp, OFFSET_OF(T, y)) );
1026 __ sw(a4, MemOperand(fp, OFFSET_OF(T, y)) );
1027 __ lw(a5, MemOperand(fp, OFFSET_OF(T, y)) );
1030 __ lw(a5, MemOperand(fp, OFFSET_OF(T, y)) );
1034 __ lw(a6, MemOperand(fp, OFFSET_OF(T, y)) );
1038 __ lw(a6, MemOperand(fp, OFFSET_OF(T, y)) );
1042 __ lw(a6, MemOperand(fp, OFFSET_OF(T, y)) );
1046 __ lw(a6, MemOperand(fp, OFFSET_OF(T, y)) );
1055 assm.GetCode(&desc);
1056 Handle<Code> code = isolate->factory()->NewCode(
1057 desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
1058 F3 f = FUNCTION_CAST<F3>(code->entry());
1066 Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
1074 // Test Cvt_d_uw and Trunc_uw_d macros.
1075 CcTest::InitializeVM();
1076 Isolate* isolate = CcTest::i_isolate();
1077 HandleScope scope(isolate);
1081 double cvt_small_out;
1082 uint32_t trunc_big_out;
1083 uint32_t trunc_small_out;
1084 uint32_t cvt_big_in;
1085 uint32_t cvt_small_in;
1089 MacroAssembler assm(isolate, NULL, 0);
1091 __ sw(a4, MemOperand(a0, OFFSET_OF(T, cvt_small_in)));
1092 __ Cvt_d_uw(f10, a4, f22);
1093 __ sdc1(f10, MemOperand(a0, OFFSET_OF(T, cvt_small_out)));
1095 __ Trunc_uw_d(f10, f10, f22);
1096 __ swc1(f10, MemOperand(a0, OFFSET_OF(T, trunc_small_out)));
1098 __ sw(a4, MemOperand(a0, OFFSET_OF(T, cvt_big_in)));
1099 __ Cvt_d_uw(f8, a4, f22);
1100 __ sdc1(f8, MemOperand(a0, OFFSET_OF(T, cvt_big_out)));
1102 __ Trunc_uw_d(f8, f8, f22);
1103 __ swc1(f8, MemOperand(a0, OFFSET_OF(T, trunc_big_out)));
1109 assm.GetCode(&desc);
1110 Handle<Code> code = isolate->factory()->NewCode(
1111 desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
1112 F3 f = FUNCTION_CAST<F3>(code->entry());
1114 t.cvt_big_in = 0xFFFFFFFF;
1115 t.cvt_small_in = 333;
1117 Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
1120 CHECK_EQ(t.cvt_big_out, static_cast<double>(t.cvt_big_in));
1121 CHECK_EQ(t.cvt_small_out, static_cast<double>(t.cvt_small_in));
1123 CHECK_EQ(static_cast<int>(t.trunc_big_out), static_cast<int>(t.cvt_big_in));
1124 CHECK_EQ(static_cast<int>(t.trunc_small_out),
1125 static_cast<int>(t.cvt_small_in));
1130 // Test round, floor, ceil, trunc, cvt.
1131 CcTest::InitializeVM();
1132 Isolate* isolate = CcTest::i_isolate();
1133 HandleScope scope(isolate);
1135 #define ROUND_STRUCT_ELEMENT(x) \
1136 int32_t x##_up_out; \
1137 int32_t x##_down_out; \
1138 int32_t neg_##x##_up_out; \
1139 int32_t neg_##x##_down_out; \
1140 uint32_t x##_err1_out; \
1141 uint32_t x##_err2_out; \
1142 uint32_t x##_err3_out; \
1143 uint32_t x##_err4_out; \
1144 int32_t x##_invalid_result;
1148 double round_down_in;
1149 double neg_round_up_in;
1150 double neg_round_down_in;
1156 ROUND_STRUCT_ELEMENT(round)
1157 ROUND_STRUCT_ELEMENT(floor)
1158 ROUND_STRUCT_ELEMENT(ceil)
1159 ROUND_STRUCT_ELEMENT(trunc)
1160 ROUND_STRUCT_ELEMENT(cvt)
1164 #undef ROUND_STRUCT_ELEMENT
1166 MacroAssembler assm(isolate, NULL, 0);
1170 // Disable FPU exceptions.
1171 __ ctc1(zero_reg, FCSR);
1172 #define RUN_ROUND_TEST(x) \
1173 __ ldc1(f0, MemOperand(a0, OFFSET_OF(T, round_up_in))); \
1174 __ x##_w_d(f0, f0); \
1175 __ swc1(f0, MemOperand(a0, OFFSET_OF(T, x##_up_out))); \
1177 __ ldc1(f0, MemOperand(a0, OFFSET_OF(T, round_down_in))); \
1178 __ x##_w_d(f0, f0); \
1179 __ swc1(f0, MemOperand(a0, OFFSET_OF(T, x##_down_out))); \
1181 __ ldc1(f0, MemOperand(a0, OFFSET_OF(T, neg_round_up_in))); \
1182 __ x##_w_d(f0, f0); \
1183 __ swc1(f0, MemOperand(a0, OFFSET_OF(T, neg_##x##_up_out))); \
1185 __ ldc1(f0, MemOperand(a0, OFFSET_OF(T, neg_round_down_in))); \
1186 __ x##_w_d(f0, f0); \
1187 __ swc1(f0, MemOperand(a0, OFFSET_OF(T, neg_##x##_down_out))); \
1189 __ ldc1(f0, MemOperand(a0, OFFSET_OF(T, err1_in))); \
1190 __ ctc1(zero_reg, FCSR); \
1191 __ x##_w_d(f0, f0); \
1192 __ cfc1(a2, FCSR); \
1193 __ sw(a2, MemOperand(a0, OFFSET_OF(T, x##_err1_out))); \
1195 __ ldc1(f0, MemOperand(a0, OFFSET_OF(T, err2_in))); \
1196 __ ctc1(zero_reg, FCSR); \
1197 __ x##_w_d(f0, f0); \
1198 __ cfc1(a2, FCSR); \
1199 __ sw(a2, MemOperand(a0, OFFSET_OF(T, x##_err2_out))); \
1201 __ ldc1(f0, MemOperand(a0, OFFSET_OF(T, err3_in))); \
1202 __ ctc1(zero_reg, FCSR); \
1203 __ x##_w_d(f0, f0); \
1204 __ cfc1(a2, FCSR); \
1205 __ sw(a2, MemOperand(a0, OFFSET_OF(T, x##_err3_out))); \
1207 __ ldc1(f0, MemOperand(a0, OFFSET_OF(T, err4_in))); \
1208 __ ctc1(zero_reg, FCSR); \
1209 __ x##_w_d(f0, f0); \
1210 __ cfc1(a2, FCSR); \
1211 __ sw(a2, MemOperand(a0, OFFSET_OF(T, x##_err4_out))); \
1212 __ swc1(f0, MemOperand(a0, OFFSET_OF(T, x##_invalid_result)));
1214 RUN_ROUND_TEST(round)
1215 RUN_ROUND_TEST(floor)
1216 RUN_ROUND_TEST(ceil)
1217 RUN_ROUND_TEST(trunc)
1227 assm.GetCode(&desc);
1228 Handle<Code> code = isolate->factory()->NewCode(
1229 desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
1230 F3 f = FUNCTION_CAST<F3>(code->entry());
1232 t.round_up_in = 123.51;
1233 t.round_down_in = 123.49;
1234 t.neg_round_up_in = -123.5;
1235 t.neg_round_down_in = -123.49;
1238 t.err3_in = static_cast<double>(1) + 0xFFFFFFFF;
1241 Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
1244 #define GET_FPU_ERR(x) (static_cast<int>(x & kFCSRFlagMask))
1245 #define CHECK_ROUND_RESULT(type) \
1246 CHECK(GET_FPU_ERR(t.type##_err1_out) & kFCSRInexactFlagMask); \
1247 CHECK_EQ(0, GET_FPU_ERR(t.type##_err2_out)); \
1248 CHECK(GET_FPU_ERR(t.type##_err3_out) & kFCSRInvalidOpFlagMask); \
1249 CHECK(GET_FPU_ERR(t.type##_err4_out) & kFCSRInvalidOpFlagMask); \
1250 CHECK_EQ(static_cast<int32_t>(kFPUInvalidResult), t.type##_invalid_result);
1252 CHECK_ROUND_RESULT(round);
1253 CHECK_ROUND_RESULT(floor);
1254 CHECK_ROUND_RESULT(ceil);
1255 CHECK_ROUND_RESULT(cvt);
1260 // Test chaining of label usages within instructions (issue 1644).
1261 CcTest::InitializeVM();
1262 Isolate* isolate = CcTest::i_isolate();
1263 HandleScope scope(isolate);
1264 Assembler assm(isolate, NULL, 0);
1267 __ beq(v0, v1, &target);
1269 __ bne(v0, v1, &target);
1276 // ----- mips64 tests -----------------------------------------------
1279 // Test 64-bit memory loads and stores.
1280 CcTest::InitializeVM();
1281 Isolate* isolate = CcTest::i_isolate();
1282 HandleScope scope(isolate);
1296 Assembler assm(isolate, NULL, 0);
1299 // Basic 32-bit word load/store, with un-signed data.
1300 __ lw(a4, MemOperand(a0, OFFSET_OF(T, ui)) );
1301 __ sw(a4, MemOperand(a0, OFFSET_OF(T, r1)) );
1303 // Check that the data got zero-extended into 64-bit a4.
1304 __ sd(a4, MemOperand(a0, OFFSET_OF(T, r2)) );
1306 // Basic 32-bit word load/store, with SIGNED data.
1307 __ lw(a5, MemOperand(a0, OFFSET_OF(T, si)) );
1308 __ sw(a5, MemOperand(a0, OFFSET_OF(T, r3)) );
1310 // Check that the data got sign-extended into 64-bit a4.
1311 __ sd(a5, MemOperand(a0, OFFSET_OF(T, r4)) );
1313 // 32-bit UNSIGNED word load/store, with SIGNED data.
1314 __ lwu(a6, MemOperand(a0, OFFSET_OF(T, si)) );
1315 __ sw(a6, MemOperand(a0, OFFSET_OF(T, r5)) );
1317 // Check that the data got zero-extended into 64-bit a4.
1318 __ sd(a6, MemOperand(a0, OFFSET_OF(T, r6)) );
1320 // lh with positive data.
1321 __ lh(a5, MemOperand(a0, OFFSET_OF(T, ui)) );
1322 __ sw(a5, MemOperand(a0, OFFSET_OF(T, r2)) );
1324 // lh with negative data.
1325 __ lh(a6, MemOperand(a0, OFFSET_OF(T, si)) );
1326 __ sw(a6, MemOperand(a0, OFFSET_OF(T, r3)) );
1328 // lhu with negative data.
1329 __ lhu(a7, MemOperand(a0, OFFSET_OF(T, si)) );
1330 __ sw(a7, MemOperand(a0, OFFSET_OF(T, r4)) );
1332 // lb with negative data.
1333 __ lb(t0, MemOperand(a0, OFFSET_OF(T, si)) );
1334 __ sw(t0, MemOperand(a0, OFFSET_OF(T, r5)) );
1336 // // sh writes only 1/2 of word.
1338 __ ori(t1, t1, 0x3333);
1339 __ sw(t1, MemOperand(a0, OFFSET_OF(T, r6)) );
1340 __ lhu(t1, MemOperand(a0, OFFSET_OF(T, si)) );
1341 __ sh(t1, MemOperand(a0, OFFSET_OF(T, r6)) );
1347 assm.GetCode(&desc);
1348 Handle<Code> code = isolate->factory()->NewCode(
1349 desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
1350 F3 f = FUNCTION_CAST<F3>(code->entry());
1353 t.r1 = 0x1111111111111111;
1354 t.r2 = 0x2222222222222222;
1355 t.r3 = 0x3333333333333333;
1356 t.r4 = 0x4444444444444444;
1357 t.r5 = 0x5555555555555555;
1358 t.r6 = 0x6666666666666666;
1359 Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
1362 // Unsigned data, 32 & 64.
1363 CHECK_EQ(0x1111111144332211L, t.r1);
1364 CHECK_EQ(0x0000000000002211L, t.r2);
1366 // Signed data, 32 & 64.
1367 CHECK_EQ(0x33333333ffffbbccL, t.r3);
1368 CHECK_EQ(0xffffffff0000bbccL, t.r4);
1370 // Signed data, 32 & 64.
1371 CHECK_EQ(0x55555555ffffffccL, t.r5);
1372 CHECK_EQ(0x000000003333bbccL, t.r6);