1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 #include "src/disassembler.h"
31 #include "src/factory.h"
32 #include "src/macro-assembler.h"
33 #include "src/mips64/macro-assembler-mips64.h"
34 #include "src/mips64/simulator-mips64.h"
36 #include "test/cctest/cctest.h"
38 using namespace v8::internal;
41 // Define these function prototypes to match JSEntryFunction in execution.cc.
42 typedef Object* (*F1)(int x, int p1, int p2, int p3, int p4);
43 typedef Object* (*F2)(int x, int y, int p2, int p3, int p4);
44 typedef Object* (*F3)(void* p, int p1, int p2, int p3, int p4);
51 CcTest::InitializeVM();
52 Isolate* isolate = CcTest::i_isolate();
53 HandleScope scope(isolate);
55 MacroAssembler assm(isolate, NULL, 0);
64 Handle<Code> code = isolate->factory()->NewCode(
65 desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
66 F2 f = FUNCTION_CAST<F2>(code->entry());
68 reinterpret_cast<int64_t>(CALL_GENERATED_CODE(f, 0xab0, 0xc, 0, 0, 0));
69 ::printf("f() = %ld\n", res);
70 CHECK_EQ(0xabcL, res);
75 CcTest::InitializeVM();
76 Isolate* isolate = CcTest::i_isolate();
77 HandleScope scope(isolate);
79 MacroAssembler assm(isolate, NULL, 0);
93 __ Branch(&L, ne, v1, Operand((int64_t)0));
101 Handle<Code> code = isolate->factory()->NewCode(
102 desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
103 F1 f = FUNCTION_CAST<F1>(code->entry());
105 reinterpret_cast<int64_t>(CALL_GENERATED_CODE(f, 50, 0, 0, 0, 0));
106 ::printf("f() = %ld\n", res);
107 CHECK_EQ(1275L, res);
112 CcTest::InitializeVM();
113 Isolate* isolate = CcTest::i_isolate();
114 HandleScope scope(isolate);
116 MacroAssembler assm(isolate, NULL, 0);
120 // ----- Test all instructions.
122 // Test lui, ori, and addiu, used in the li pseudo-instruction.
123 // This way we can then safely load registers with chosen values.
125 __ ori(a4, zero_reg, 0);
128 __ ori(a4, a4, 0x0f0f);
129 __ ori(a4, a4, 0xf0f0);
131 __ addiu(a6, a5, -0x10);
133 // Load values in temporary registers.
134 __ li(a4, 0x00000004);
135 __ li(a5, 0x00001234);
136 __ li(a6, 0x12345678);
137 __ li(a7, 0x7fffffff);
138 __ li(t0, 0xfffffffc);
139 __ li(t1, 0xffffedcc);
140 __ li(t2, 0xedcba988);
141 __ li(t3, 0x80000000);
144 __ srl(v0, a6, 8); // 0x00123456
145 __ sll(v0, v0, 11); // 0x91a2b000
146 __ sra(v0, v0, 3); // 0xf2345600
147 __ srav(v0, v0, a4); // 0xff234560
148 __ sllv(v0, v0, a4); // 0xf2345600
149 __ srlv(v0, v0, a4); // 0x0f234560
150 __ Branch(&error, ne, v0, Operand(0x0f234560));
153 __ addu(v0, a4, a5); // 0x00001238
154 __ subu(v0, v0, a4); // 0x00001234
155 __ Branch(&error, ne, v0, Operand(0x00001234));
157 __ addu(v1, a7, a4); // 32bit addu result is sign-extended into 64bit reg.
158 __ Branch(&error, ne, v1, Operand(0xffffffff80000003));
160 __ subu(v1, t3, a4); // 0x7ffffffc
161 __ Branch(&error, ne, v1, Operand(0x7ffffffc));
164 __ and_(v0, a5, a6); // 0x0000000000001230
165 __ or_(v0, v0, a5); // 0x0000000000001234
166 __ xor_(v0, v0, a6); // 0x000000001234444c
167 __ nor(v0, v0, a6); // 0xffffffffedcba987
168 __ Branch(&error, ne, v0, Operand(0xffffffffedcba983));
171 // Shift both 32bit number to left, to preserve meaning of next comparison.
172 __ dsll32(a7, a7, 0);
173 __ dsll32(t3, t3, 0);
176 __ Branch(&error, ne, v0, Operand(0x1));
179 __ Branch(&error, ne, v0, Operand(zero_reg));
182 // Restore original values in registers.
183 __ dsrl32(a7, a7, 0);
184 __ dsrl32(t3, t3, 0);
185 // End of SPECIAL class.
187 __ addiu(v0, zero_reg, 0x7421); // 0x00007421
188 __ addiu(v0, v0, -0x1); // 0x00007420
189 __ addiu(v0, v0, -0x20); // 0x00007400
190 __ Branch(&error, ne, v0, Operand(0x00007400));
192 __ addiu(v1, a7, 0x1); // 0x80000000 - result is sign-extended.
193 __ Branch(&error, ne, v1, Operand(0xffffffff80000000));
196 __ slti(v0, a5, 0x00002000); // 0x1
197 __ slti(v0, v0, 0xffff8000); // 0x0
198 __ Branch(&error, ne, v0, Operand(zero_reg));
200 __ sltiu(v0, a5, 0x00002000); // 0x1
201 __ sltiu(v0, v0, 0x00008000); // 0x1
202 __ Branch(&error, ne, v0, Operand(0x1));
205 __ andi(v0, a5, 0xf0f0); // 0x00001030
206 __ ori(v0, v0, 0x8a00); // 0x00009a30
207 __ xori(v0, v0, 0x83cc); // 0x000019fc
208 __ Branch(&error, ne, v0, Operand(0x000019fc));
210 __ lui(v1, 0x8123); // Result is sign-extended into 64bit register.
211 __ Branch(&error, ne, v1, Operand(0xffffffff81230000));
214 // Bit twiddling instructions & conditional moves.
215 // Uses a4-t3 as set above.
216 __ Clz(v0, a4); // 29
217 __ Clz(v1, a5); // 19
218 __ addu(v0, v0, v1); // 48
220 __ addu(v0, v0, v1); // 51
222 __ addu(v0, v0, v1); // 51
223 __ Branch(&error, ne, v0, Operand(51));
224 __ Movn(a0, a7, a4); // Move a0<-a7 (a4 is NOT 0).
225 __ Ins(a0, a5, 12, 8); // 0x7ff34fff
226 __ Branch(&error, ne, a0, Operand(0x7ff34fff));
227 __ Movz(a0, t2, t3); // a0 not updated (t3 is NOT 0).
228 __ Ext(a1, a0, 8, 12); // 0x34f
229 __ Branch(&error, ne, a1, Operand(0x34f));
230 __ Movz(a0, t2, v1); // a0<-t2, v0 is 0, from 8 instr back.
231 __ Branch(&error, ne, a0, Operand(t2));
233 // Everything was correctly executed. Load the expected result.
234 __ li(v0, 0x31415926);
239 // Got an error. Return a wrong result.
248 Handle<Code> code = isolate->factory()->NewCode(
249 desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
250 F2 f = FUNCTION_CAST<F2>(code->entry());
252 reinterpret_cast<int64_t>(CALL_GENERATED_CODE(f, 0xab0, 0xc, 0, 0, 0));
253 ::printf("f() = %ld\n", res);
255 CHECK_EQ(0x31415926L, res);
260 // Test floating point instructions.
261 CcTest::InitializeVM();
262 Isolate* isolate = CcTest::i_isolate();
263 HandleScope scope(isolate);
278 // Create a function that accepts &t, and loads, manipulates, and stores
279 // the doubles t.a ... t.f.
280 MacroAssembler assm(isolate, NULL, 0);
283 __ ldc1(f4, MemOperand(a0, OFFSET_OF(T, a)) );
284 __ ldc1(f6, MemOperand(a0, OFFSET_OF(T, b)) );
285 __ add_d(f8, f4, f6);
286 __ sdc1(f8, MemOperand(a0, OFFSET_OF(T, c)) ); // c = a + b.
288 __ mov_d(f10, f8); // c
289 __ neg_d(f12, f6); // -b
290 __ sub_d(f10, f10, f12);
291 __ sdc1(f10, MemOperand(a0, OFFSET_OF(T, d)) ); // d = c - (-b).
293 __ sdc1(f4, MemOperand(a0, OFFSET_OF(T, b)) ); // b = a.
297 __ cvt_d_w(f14, f14); // f14 = 120.0.
298 __ mul_d(f10, f10, f14);
299 __ sdc1(f10, MemOperand(a0, OFFSET_OF(T, e)) ); // e = d * 120 = 1.8066e16.
301 __ div_d(f12, f10, f4);
302 __ sdc1(f12, MemOperand(a0, OFFSET_OF(T, f)) ); // f = e / a = 120.44.
305 __ sdc1(f14, MemOperand(a0, OFFSET_OF(T, g)) );
306 // g = sqrt(f) = 10.97451593465515908537
308 if (kArchVariant == kMips64r2) {
309 __ ldc1(f4, MemOperand(a0, OFFSET_OF(T, h)) );
310 __ ldc1(f6, MemOperand(a0, OFFSET_OF(T, i)) );
311 __ madd_d(f14, f6, f4, f6);
312 __ sdc1(f14, MemOperand(a0, OFFSET_OF(T, h)) );
320 Handle<Code> code = isolate->factory()->NewCode(
321 desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
322 F3 f = FUNCTION_CAST<F3>(code->entry());
331 Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
333 CHECK_EQ(1.5e14, t.a);
334 CHECK_EQ(1.5e14, t.b);
335 CHECK_EQ(1.50275e14, t.c);
336 CHECK_EQ(1.50550e14, t.d);
337 CHECK_EQ(1.8066e16, t.e);
338 CHECK_EQ(120.44, t.f);
339 CHECK_EQ(10.97451593465515908537, t.g);
340 if (kArchVariant == kMips64r2) {
341 CHECK_EQ(6.875, t.h);
347 // Test moves between floating point and integer registers.
348 CcTest::InitializeVM();
349 Isolate* isolate = CcTest::i_isolate();
350 HandleScope scope(isolate);
362 Assembler assm(isolate, NULL, 0);
365 __ ldc1(f4, MemOperand(a0, OFFSET_OF(T, a)));
366 __ ldc1(f5, MemOperand(a0, OFFSET_OF(T, b)));
368 // Swap f4 and f5, by using 3 integer registers, a4-a6,
369 // both two 32-bit chunks, and one 64-bit chunk.
370 // mXhc1 is mips32/64-r2 only, not r1,
371 // but we will not support r1 in practice.
380 // Store the swapped f4 and f5 back to memory.
381 __ sdc1(f4, MemOperand(a0, OFFSET_OF(T, a)));
382 __ sdc1(f5, MemOperand(a0, OFFSET_OF(T, c)));
384 // Test sign extension of move operations from coprocessor.
385 __ ldc1(f4, MemOperand(a0, OFFSET_OF(T, d)));
389 __ sd(a4, MemOperand(a0, OFFSET_OF(T, high)));
390 __ sd(a5, MemOperand(a0, OFFSET_OF(T, low)));
397 Handle<Code> code = isolate->factory()->NewCode(
398 desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
399 F3 f = FUNCTION_CAST<F3>(code->entry());
404 Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
407 CHECK_EQ(2.75e11, t.a);
408 CHECK_EQ(2.75e11, t.b);
409 CHECK_EQ(1.5e22, t.c);
410 CHECK_EQ(0xffffffffc25001d1L, t.high);
411 CHECK_EQ(0xffffffffbf800000L, t.low);
416 // Test conversions between doubles and integers.
417 CcTest::InitializeVM();
418 Isolate* isolate = CcTest::i_isolate();
419 HandleScope scope(isolate);
429 Assembler assm(isolate, NULL, 0);
432 // Load all structure elements to registers.
433 __ ldc1(f4, MemOperand(a0, OFFSET_OF(T, a)) );
434 __ ldc1(f6, MemOperand(a0, OFFSET_OF(T, b)) );
435 __ lw(a4, MemOperand(a0, OFFSET_OF(T, i)) );
436 __ lw(a5, MemOperand(a0, OFFSET_OF(T, j)) );
438 // Convert double in f4 to int in element i.
441 __ sw(a6, MemOperand(a0, OFFSET_OF(T, i)) );
443 // Convert double in f6 to int in element j.
446 __ sw(a7, MemOperand(a0, OFFSET_OF(T, j)) );
448 // Convert int in original i (a4) to double in a.
451 __ sdc1(f0, MemOperand(a0, OFFSET_OF(T, a)) );
453 // Convert int in original j (a5) to double in b.
456 __ sdc1(f2, MemOperand(a0, OFFSET_OF(T, b)) );
463 Handle<Code> code = isolate->factory()->NewCode(
464 desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
465 F3 f = FUNCTION_CAST<F3>(code->entry());
470 Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
473 CHECK_EQ(12345678.0, t.a);
474 CHECK_EQ(-100000.0, t.b);
475 CHECK_EQ(15000, t.i);
476 CHECK_EQ(275000000, t.j);
481 // Test simple memory loads and stores.
482 CcTest::InitializeVM();
483 Isolate* isolate = CcTest::i_isolate();
484 HandleScope scope(isolate);
498 Assembler assm(isolate, NULL, 0);
501 // Basic word load/store.
502 __ lw(a4, MemOperand(a0, OFFSET_OF(T, ui)) );
503 __ sw(a4, MemOperand(a0, OFFSET_OF(T, r1)) );
505 // lh with positive data.
506 __ lh(a5, MemOperand(a0, OFFSET_OF(T, ui)) );
507 __ sw(a5, MemOperand(a0, OFFSET_OF(T, r2)) );
509 // lh with negative data.
510 __ lh(a6, MemOperand(a0, OFFSET_OF(T, si)) );
511 __ sw(a6, MemOperand(a0, OFFSET_OF(T, r3)) );
513 // lhu with negative data.
514 __ lhu(a7, MemOperand(a0, OFFSET_OF(T, si)) );
515 __ sw(a7, MemOperand(a0, OFFSET_OF(T, r4)) );
517 // lb with negative data.
518 __ lb(t0, MemOperand(a0, OFFSET_OF(T, si)) );
519 __ sw(t0, MemOperand(a0, OFFSET_OF(T, r5)) );
521 // sh writes only 1/2 of word.
523 __ ori(t1, t1, 0x3333);
524 __ sw(t1, MemOperand(a0, OFFSET_OF(T, r6)) );
525 __ lhu(t1, MemOperand(a0, OFFSET_OF(T, si)) );
526 __ sh(t1, MemOperand(a0, OFFSET_OF(T, r6)) );
533 Handle<Code> code = isolate->factory()->NewCode(
534 desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
535 F3 f = FUNCTION_CAST<F3>(code->entry());
538 Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
541 CHECK_EQ(0x11223344, t.r1);
542 CHECK_EQ(0x3344, t.r2);
543 CHECK_EQ(0xffffbbcc, t.r3);
544 CHECK_EQ(0x0000bbcc, t.r4);
545 CHECK_EQ(0xffffffcc, t.r5);
546 CHECK_EQ(0x3333bbcc, t.r6);
551 // Test floating point compare and branch instructions.
552 CcTest::InitializeVM();
553 Isolate* isolate = CcTest::i_isolate();
554 HandleScope scope(isolate);
567 // Create a function that accepts &t, and loads, manipulates, and stores
568 // the doubles t.a ... t.f.
569 MacroAssembler assm(isolate, NULL, 0);
570 Label neither_is_nan, less_than, outa_here;
572 __ ldc1(f4, MemOperand(a0, OFFSET_OF(T, a)) );
573 __ ldc1(f6, MemOperand(a0, OFFSET_OF(T, b)) );
574 if (kArchVariant != kMips64r6) {
576 __ bc1f(&neither_is_nan);
578 __ cmp(UN, L, f2, f4, f6);
579 __ bc1eqz(&neither_is_nan, f2);
582 __ sw(zero_reg, MemOperand(a0, OFFSET_OF(T, result)) );
583 __ Branch(&outa_here);
585 __ bind(&neither_is_nan);
587 if (kArchVariant == kMips64r6) {
588 __ cmp(OLT, L, f2, f6, f4);
589 __ bc1nez(&less_than, f2);
591 __ c(OLT, D, f6, f4, 2);
592 __ bc1t(&less_than, 2);
596 __ sw(zero_reg, MemOperand(a0, OFFSET_OF(T, result)) );
597 __ Branch(&outa_here);
600 __ Addu(a4, zero_reg, Operand(1));
601 __ sw(a4, MemOperand(a0, OFFSET_OF(T, result)) ); // Set true.
604 // This test-case should have additional tests.
613 Handle<Code> code = isolate->factory()->NewCode(
614 desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
615 F3 f = FUNCTION_CAST<F3>(code->entry());
623 Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
625 CHECK_EQ(1.5e14, t.a);
626 CHECK_EQ(2.75e11, t.b);
627 CHECK_EQ(1, t.result);
632 // Test ROTR and ROTRV instructions.
633 CcTest::InitializeVM();
634 Isolate* isolate = CcTest::i_isolate();
635 HandleScope scope(isolate);
639 int32_t result_rotr_4;
640 int32_t result_rotr_8;
641 int32_t result_rotr_12;
642 int32_t result_rotr_16;
643 int32_t result_rotr_20;
644 int32_t result_rotr_24;
645 int32_t result_rotr_28;
646 int32_t result_rotrv_4;
647 int32_t result_rotrv_8;
648 int32_t result_rotrv_12;
649 int32_t result_rotrv_16;
650 int32_t result_rotrv_20;
651 int32_t result_rotrv_24;
652 int32_t result_rotrv_28;
656 MacroAssembler assm(isolate, NULL, 0);
659 __ lw(a4, MemOperand(a0, OFFSET_OF(T, input)) );
661 // ROTR instruction (called through the Ror macro).
662 __ Ror(a5, a4, 0x0004);
663 __ Ror(a6, a4, 0x0008);
664 __ Ror(a7, a4, 0x000c);
665 __ Ror(t0, a4, 0x0010);
666 __ Ror(t1, a4, 0x0014);
667 __ Ror(t2, a4, 0x0018);
668 __ Ror(t3, a4, 0x001c);
671 __ sw(a5, MemOperand(a0, OFFSET_OF(T, result_rotr_4)) );
672 __ sw(a6, MemOperand(a0, OFFSET_OF(T, result_rotr_8)) );
673 __ sw(a7, MemOperand(a0, OFFSET_OF(T, result_rotr_12)) );
674 __ sw(t0, MemOperand(a0, OFFSET_OF(T, result_rotr_16)) );
675 __ sw(t1, MemOperand(a0, OFFSET_OF(T, result_rotr_20)) );
676 __ sw(t2, MemOperand(a0, OFFSET_OF(T, result_rotr_24)) );
677 __ sw(t3, MemOperand(a0, OFFSET_OF(T, result_rotr_28)) );
679 // ROTRV instruction (called through the Ror macro).
696 __ sw(a5, MemOperand(a0, OFFSET_OF(T, result_rotrv_4)) );
697 __ sw(a6, MemOperand(a0, OFFSET_OF(T, result_rotrv_8)) );
698 __ sw(a7, MemOperand(a0, OFFSET_OF(T, result_rotrv_12)) );
699 __ sw(t0, MemOperand(a0, OFFSET_OF(T, result_rotrv_16)) );
700 __ sw(t1, MemOperand(a0, OFFSET_OF(T, result_rotrv_20)) );
701 __ sw(t2, MemOperand(a0, OFFSET_OF(T, result_rotrv_24)) );
702 __ sw(t3, MemOperand(a0, OFFSET_OF(T, result_rotrv_28)) );
709 Handle<Code> code = isolate->factory()->NewCode(
710 desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
711 F3 f = FUNCTION_CAST<F3>(code->entry());
712 t.input = 0x12345678;
713 Object* dummy = CALL_GENERATED_CODE(f, &t, 0x0, 0, 0, 0);
715 CHECK_EQ(0x81234567, t.result_rotr_4);
716 CHECK_EQ(0x78123456, t.result_rotr_8);
717 CHECK_EQ(0x67812345, t.result_rotr_12);
718 CHECK_EQ(0x56781234, t.result_rotr_16);
719 CHECK_EQ(0x45678123, t.result_rotr_20);
720 CHECK_EQ(0x34567812, t.result_rotr_24);
721 CHECK_EQ(0x23456781, t.result_rotr_28);
723 CHECK_EQ(0x81234567, t.result_rotrv_4);
724 CHECK_EQ(0x78123456, t.result_rotrv_8);
725 CHECK_EQ(0x67812345, t.result_rotrv_12);
726 CHECK_EQ(0x56781234, t.result_rotrv_16);
727 CHECK_EQ(0x45678123, t.result_rotrv_20);
728 CHECK_EQ(0x34567812, t.result_rotrv_24);
729 CHECK_EQ(0x23456781, t.result_rotrv_28);
734 // Test BRANCH improvements.
735 CcTest::InitializeVM();
736 Isolate* isolate = CcTest::i_isolate();
737 HandleScope scope(isolate);
739 MacroAssembler assm(isolate, NULL, 0);
740 Label exit, exit2, exit3;
742 __ Branch(&exit, ge, a0, Operand(zero_reg));
743 __ Branch(&exit2, ge, a0, Operand(0x00001FFF));
744 __ Branch(&exit3, ge, a0, Operand(0x0001FFFF));
754 isolate->factory()->NewCode(
755 desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
760 // Test conversions between doubles and long integers.
761 // Test hos the long ints map to FP regs pairs.
762 CcTest::InitializeVM();
763 Isolate* isolate = CcTest::i_isolate();
764 HandleScope scope(isolate);
774 int64_t long_as_int64;
777 int64_t b_long_as_int64;
781 Assembler assm(isolate, NULL, 0);
784 if (kArchVariant == kMips64r2) {
785 // Rewritten for FR=1 FPU mode:
786 // - 32 FP regs of 64-bits each, no odd/even pairs.
787 // - Note that cvt_l_d/cvt_d_l ARE legal in FR=1 mode.
788 // Load all structure elements to registers.
789 __ ldc1(f0, MemOperand(a0, OFFSET_OF(T, a)));
791 // Save the raw bits of the double.
794 __ sw(a4, MemOperand(a0, OFFSET_OF(T, dbl_mant)));
795 __ sw(a5, MemOperand(a0, OFFSET_OF(T, dbl_exp)));
797 // Convert double in f0 to long, save hi/lo parts.
799 __ mfc1(a4, f0); // f0 LS 32 bits of long.
800 __ mfhc1(a5, f0); // f0 MS 32 bits of long.
801 __ sw(a4, MemOperand(a0, OFFSET_OF(T, long_lo)));
802 __ sw(a5, MemOperand(a0, OFFSET_OF(T, long_hi)));
804 // Combine the high/low ints, convert back to double.
805 __ dsll32(a6, a5, 0); // Move a5 to high bits of a6.
809 __ sdc1(f1, MemOperand(a0, OFFSET_OF(T, a_converted)));
812 // Convert the b long integers to double b.
813 __ lw(a4, MemOperand(a0, OFFSET_OF(T, b_long_lo)));
814 __ lw(a5, MemOperand(a0, OFFSET_OF(T, b_long_hi)));
815 __ mtc1(a4, f8); // f8 LS 32-bits.
816 __ mthc1(a5, f8); // f8 MS 32-bits.
818 __ sdc1(f10, MemOperand(a0, OFFSET_OF(T, b)));
820 // Convert double b back to long-int.
821 __ ldc1(f31, MemOperand(a0, OFFSET_OF(T, b)));
822 __ cvt_l_d(f31, f31);
824 __ sd(a7, MemOperand(a0, OFFSET_OF(T, b_long_as_int64)));
832 Handle<Code> code = isolate->factory()->NewCode(
833 desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
834 F3 f = FUNCTION_CAST<F3>(code->entry());
835 t.a = 2.147483647e9; // 0x7fffffff -> 0x41DFFFFFFFC00000 as double.
836 t.b_long_hi = 0x000000ff; // 0xFF00FF00FF -> 0x426FE01FE01FE000 as double.
837 t.b_long_lo = 0x00ff00ff;
838 Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
841 CHECK_EQ(0x41DFFFFF, t.dbl_exp);
842 CHECK_EQ(0xFFC00000, t.dbl_mant);
843 CHECK_EQ(0, t.long_hi);
844 CHECK_EQ(0x7fffffff, t.long_lo);
845 CHECK_EQ(2.147483647e9, t.a_converted);
847 // 0xFF00FF00FF -> 1.095233372415e12.
848 CHECK_EQ(1.095233372415e12, t.b);
849 CHECK_EQ(0xFF00FF00FF, t.b_long_as_int64);
855 // Do not run test on MIPS64r6, as these instructions are removed.
856 if (kArchVariant != kMips64r6) {
857 // Test LWL, LWR, SWL and SWR instructions.
858 CcTest::InitializeVM();
859 Isolate* isolate = CcTest::i_isolate();
860 HandleScope scope(isolate);
884 Assembler assm(isolate, NULL, 0);
886 // Test all combinations of LWL and vAddr.
887 __ lw(a4, MemOperand(a0, OFFSET_OF(T, reg_init)));
888 __ lwl(a4, MemOperand(a0, OFFSET_OF(T, mem_init)));
889 __ sw(a4, MemOperand(a0, OFFSET_OF(T, lwl_0)));
891 __ lw(a5, MemOperand(a0, OFFSET_OF(T, reg_init)));
892 __ lwl(a5, MemOperand(a0, OFFSET_OF(T, mem_init) + 1));
893 __ sw(a5, MemOperand(a0, OFFSET_OF(T, lwl_1)));
895 __ lw(a6, MemOperand(a0, OFFSET_OF(T, reg_init)));
896 __ lwl(a6, MemOperand(a0, OFFSET_OF(T, mem_init) + 2));
897 __ sw(a6, MemOperand(a0, OFFSET_OF(T, lwl_2)));
899 __ lw(a7, MemOperand(a0, OFFSET_OF(T, reg_init)));
900 __ lwl(a7, MemOperand(a0, OFFSET_OF(T, mem_init) + 3));
901 __ sw(a7, MemOperand(a0, OFFSET_OF(T, lwl_3)));
903 // Test all combinations of LWR and vAddr.
904 __ lw(a4, MemOperand(a0, OFFSET_OF(T, reg_init)));
905 __ lwr(a4, MemOperand(a0, OFFSET_OF(T, mem_init)));
906 __ sw(a4, MemOperand(a0, OFFSET_OF(T, lwr_0)));
908 __ lw(a5, MemOperand(a0, OFFSET_OF(T, reg_init)));
909 __ lwr(a5, MemOperand(a0, OFFSET_OF(T, mem_init) + 1));
910 __ sw(a5, MemOperand(a0, OFFSET_OF(T, lwr_1)));
912 __ lw(a6, MemOperand(a0, OFFSET_OF(T, reg_init)));
913 __ lwr(a6, MemOperand(a0, OFFSET_OF(T, mem_init) + 2));
914 __ sw(a6, MemOperand(a0, OFFSET_OF(T, lwr_2)) );
916 __ lw(a7, MemOperand(a0, OFFSET_OF(T, reg_init)));
917 __ lwr(a7, MemOperand(a0, OFFSET_OF(T, mem_init) + 3));
918 __ sw(a7, MemOperand(a0, OFFSET_OF(T, lwr_3)) );
920 // Test all combinations of SWL and vAddr.
921 __ lw(a4, MemOperand(a0, OFFSET_OF(T, mem_init)));
922 __ sw(a4, MemOperand(a0, OFFSET_OF(T, swl_0)));
923 __ lw(a4, MemOperand(a0, OFFSET_OF(T, reg_init)));
924 __ swl(a4, MemOperand(a0, OFFSET_OF(T, swl_0)));
926 __ lw(a5, MemOperand(a0, OFFSET_OF(T, mem_init)));
927 __ sw(a5, MemOperand(a0, OFFSET_OF(T, swl_1)));
928 __ lw(a5, MemOperand(a0, OFFSET_OF(T, reg_init)));
929 __ swl(a5, MemOperand(a0, OFFSET_OF(T, swl_1) + 1));
931 __ lw(a6, MemOperand(a0, OFFSET_OF(T, mem_init)));
932 __ sw(a6, MemOperand(a0, OFFSET_OF(T, swl_2)));
933 __ lw(a6, MemOperand(a0, OFFSET_OF(T, reg_init)));
934 __ swl(a6, MemOperand(a0, OFFSET_OF(T, swl_2) + 2));
936 __ lw(a7, MemOperand(a0, OFFSET_OF(T, mem_init)));
937 __ sw(a7, MemOperand(a0, OFFSET_OF(T, swl_3)));
938 __ lw(a7, MemOperand(a0, OFFSET_OF(T, reg_init)));
939 __ swl(a7, MemOperand(a0, OFFSET_OF(T, swl_3) + 3));
941 // Test all combinations of SWR and vAddr.
942 __ lw(a4, MemOperand(a0, OFFSET_OF(T, mem_init)));
943 __ sw(a4, MemOperand(a0, OFFSET_OF(T, swr_0)));
944 __ lw(a4, MemOperand(a0, OFFSET_OF(T, reg_init)));
945 __ swr(a4, MemOperand(a0, OFFSET_OF(T, swr_0)));
947 __ lw(a5, MemOperand(a0, OFFSET_OF(T, mem_init)));
948 __ sw(a5, MemOperand(a0, OFFSET_OF(T, swr_1)));
949 __ lw(a5, MemOperand(a0, OFFSET_OF(T, reg_init)));
950 __ swr(a5, MemOperand(a0, OFFSET_OF(T, swr_1) + 1));
952 __ lw(a6, MemOperand(a0, OFFSET_OF(T, mem_init)));
953 __ sw(a6, MemOperand(a0, OFFSET_OF(T, swr_2)));
954 __ lw(a6, MemOperand(a0, OFFSET_OF(T, reg_init)));
955 __ swr(a6, MemOperand(a0, OFFSET_OF(T, swr_2) + 2));
957 __ lw(a7, MemOperand(a0, OFFSET_OF(T, mem_init)));
958 __ sw(a7, MemOperand(a0, OFFSET_OF(T, swr_3)));
959 __ lw(a7, MemOperand(a0, OFFSET_OF(T, reg_init)));
960 __ swr(a7, MemOperand(a0, OFFSET_OF(T, swr_3) + 3));
967 Handle<Code> code = isolate->factory()->NewCode(
968 desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
969 F3 f = FUNCTION_CAST<F3>(code->entry());
970 t.reg_init = 0xaabbccdd;
971 t.mem_init = 0x11223344;
973 Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
976 CHECK_EQ(0x44bbccdd, t.lwl_0);
977 CHECK_EQ(0x3344ccdd, t.lwl_1);
978 CHECK_EQ(0x223344dd, t.lwl_2);
979 CHECK_EQ(0x11223344, t.lwl_3);
981 CHECK_EQ(0x11223344, t.lwr_0);
982 CHECK_EQ(0xaa112233, t.lwr_1);
983 CHECK_EQ(0xaabb1122, t.lwr_2);
984 CHECK_EQ(0xaabbcc11, t.lwr_3);
986 CHECK_EQ(0x112233aa, t.swl_0);
987 CHECK_EQ(0x1122aabb, t.swl_1);
988 CHECK_EQ(0x11aabbcc, t.swl_2);
989 CHECK_EQ(0xaabbccdd, t.swl_3);
991 CHECK_EQ(0xaabbccdd, t.swr_0);
992 CHECK_EQ(0xbbccdd44, t.swr_1);
993 CHECK_EQ(0xccdd3344, t.swr_2);
994 CHECK_EQ(0xdd223344, t.swr_3);
1000 CcTest::InitializeVM();
1001 Isolate* isolate = CcTest::i_isolate();
1002 HandleScope scope(isolate);
1014 MacroAssembler assm(isolate, NULL, 0);
1016 __ mov(t2, fp); // Save frame pointer.
1017 __ mov(fp, a0); // Access struct T by fp.
1018 __ lw(a4, MemOperand(a0, OFFSET_OF(T, y)));
1019 __ lw(a7, MemOperand(a0, OFFSET_OF(T, y4)));
1021 __ addu(a5, a4, a7);
1022 __ subu(t0, a4, a7);
1024 __ push(a4); // These instructions disappear after opt.
1026 __ addu(a4, a4, a4);
1028 __ Pop(); // These instructions disappear after opt.
1031 __ push(a7); // These instructions disappear after opt.
1037 __ sw(a4, MemOperand(fp, OFFSET_OF(T, y)));
1038 __ lw(a4, MemOperand(fp, OFFSET_OF(T, y)));
1040 __ sw(a4, MemOperand(fp, OFFSET_OF(T, y)));
1041 __ lw(a5, MemOperand(fp, OFFSET_OF(T, y)));
1044 __ lw(a5, MemOperand(fp, OFFSET_OF(T, y)));
1048 __ lw(a6, MemOperand(fp, OFFSET_OF(T, y)));
1052 __ lw(a6, MemOperand(fp, OFFSET_OF(T, y)));
1056 __ lw(a6, MemOperand(fp, OFFSET_OF(T, y)));
1060 __ lw(a6, MemOperand(fp, OFFSET_OF(T, y)));
1069 assm.GetCode(&desc);
1070 Handle<Code> code = isolate->factory()->NewCode(
1071 desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
1072 F3 f = FUNCTION_CAST<F3>(code->entry());
1080 Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
1088 // Test Cvt_d_uw and Trunc_uw_d macros.
1089 CcTest::InitializeVM();
1090 Isolate* isolate = CcTest::i_isolate();
1091 HandleScope scope(isolate);
1095 double cvt_small_out;
1096 uint32_t trunc_big_out;
1097 uint32_t trunc_small_out;
1098 uint32_t cvt_big_in;
1099 uint32_t cvt_small_in;
1103 MacroAssembler assm(isolate, NULL, 0);
1105 __ sw(a4, MemOperand(a0, OFFSET_OF(T, cvt_small_in)));
1106 __ Cvt_d_uw(f10, a4, f22);
1107 __ sdc1(f10, MemOperand(a0, OFFSET_OF(T, cvt_small_out)));
1109 __ Trunc_uw_d(f10, f10, f22);
1110 __ swc1(f10, MemOperand(a0, OFFSET_OF(T, trunc_small_out)));
1112 __ sw(a4, MemOperand(a0, OFFSET_OF(T, cvt_big_in)));
1113 __ Cvt_d_uw(f8, a4, f22);
1114 __ sdc1(f8, MemOperand(a0, OFFSET_OF(T, cvt_big_out)));
1116 __ Trunc_uw_d(f8, f8, f22);
1117 __ swc1(f8, MemOperand(a0, OFFSET_OF(T, trunc_big_out)));
1123 assm.GetCode(&desc);
1124 Handle<Code> code = isolate->factory()->NewCode(
1125 desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
1126 F3 f = FUNCTION_CAST<F3>(code->entry());
1128 t.cvt_big_in = 0xFFFFFFFF;
1129 t.cvt_small_in = 333;
1131 Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
1134 CHECK_EQ(t.cvt_big_out, static_cast<double>(t.cvt_big_in));
1135 CHECK_EQ(t.cvt_small_out, static_cast<double>(t.cvt_small_in));
1137 CHECK_EQ(static_cast<int>(t.trunc_big_out), static_cast<int>(t.cvt_big_in));
1138 CHECK_EQ(static_cast<int>(t.trunc_small_out),
1139 static_cast<int>(t.cvt_small_in));
1144 // Test round, floor, ceil, trunc, cvt.
1145 CcTest::InitializeVM();
1146 Isolate* isolate = CcTest::i_isolate();
1147 HandleScope scope(isolate);
1149 #define ROUND_STRUCT_ELEMENT(x) \
1150 int32_t x##_up_out; \
1151 int32_t x##_down_out; \
1152 int32_t neg_##x##_up_out; \
1153 int32_t neg_##x##_down_out; \
1154 uint32_t x##_err1_out; \
1155 uint32_t x##_err2_out; \
1156 uint32_t x##_err3_out; \
1157 uint32_t x##_err4_out; \
1158 int32_t x##_invalid_result;
1162 double round_down_in;
1163 double neg_round_up_in;
1164 double neg_round_down_in;
1170 ROUND_STRUCT_ELEMENT(round)
1171 ROUND_STRUCT_ELEMENT(floor)
1172 ROUND_STRUCT_ELEMENT(ceil)
1173 ROUND_STRUCT_ELEMENT(trunc)
1174 ROUND_STRUCT_ELEMENT(cvt)
1178 #undef ROUND_STRUCT_ELEMENT
1180 MacroAssembler assm(isolate, NULL, 0);
1184 // Disable FPU exceptions.
1185 __ ctc1(zero_reg, FCSR);
1186 #define RUN_ROUND_TEST(x) \
1187 __ ldc1(f0, MemOperand(a0, OFFSET_OF(T, round_up_in))); \
1188 __ x##_w_d(f0, f0); \
1189 __ swc1(f0, MemOperand(a0, OFFSET_OF(T, x##_up_out))); \
1191 __ ldc1(f0, MemOperand(a0, OFFSET_OF(T, round_down_in))); \
1192 __ x##_w_d(f0, f0); \
1193 __ swc1(f0, MemOperand(a0, OFFSET_OF(T, x##_down_out))); \
1195 __ ldc1(f0, MemOperand(a0, OFFSET_OF(T, neg_round_up_in))); \
1196 __ x##_w_d(f0, f0); \
1197 __ swc1(f0, MemOperand(a0, OFFSET_OF(T, neg_##x##_up_out))); \
1199 __ ldc1(f0, MemOperand(a0, OFFSET_OF(T, neg_round_down_in))); \
1200 __ x##_w_d(f0, f0); \
1201 __ swc1(f0, MemOperand(a0, OFFSET_OF(T, neg_##x##_down_out))); \
1203 __ ldc1(f0, MemOperand(a0, OFFSET_OF(T, err1_in))); \
1204 __ ctc1(zero_reg, FCSR); \
1205 __ x##_w_d(f0, f0); \
1206 __ cfc1(a2, FCSR); \
1207 __ sw(a2, MemOperand(a0, OFFSET_OF(T, x##_err1_out))); \
1209 __ ldc1(f0, MemOperand(a0, OFFSET_OF(T, err2_in))); \
1210 __ ctc1(zero_reg, FCSR); \
1211 __ x##_w_d(f0, f0); \
1212 __ cfc1(a2, FCSR); \
1213 __ sw(a2, MemOperand(a0, OFFSET_OF(T, x##_err2_out))); \
1215 __ ldc1(f0, MemOperand(a0, OFFSET_OF(T, err3_in))); \
1216 __ ctc1(zero_reg, FCSR); \
1217 __ x##_w_d(f0, f0); \
1218 __ cfc1(a2, FCSR); \
1219 __ sw(a2, MemOperand(a0, OFFSET_OF(T, x##_err3_out))); \
1221 __ ldc1(f0, MemOperand(a0, OFFSET_OF(T, err4_in))); \
1222 __ ctc1(zero_reg, FCSR); \
1223 __ x##_w_d(f0, f0); \
1224 __ cfc1(a2, FCSR); \
1225 __ sw(a2, MemOperand(a0, OFFSET_OF(T, x##_err4_out))); \
1226 __ swc1(f0, MemOperand(a0, OFFSET_OF(T, x##_invalid_result)));
1228 RUN_ROUND_TEST(round)
1229 RUN_ROUND_TEST(floor)
1230 RUN_ROUND_TEST(ceil)
1231 RUN_ROUND_TEST(trunc)
1241 assm.GetCode(&desc);
1242 Handle<Code> code = isolate->factory()->NewCode(
1243 desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
1244 F3 f = FUNCTION_CAST<F3>(code->entry());
1246 t.round_up_in = 123.51;
1247 t.round_down_in = 123.49;
1248 t.neg_round_up_in = -123.5;
1249 t.neg_round_down_in = -123.49;
1252 t.err3_in = static_cast<double>(1) + 0xFFFFFFFF;
1255 Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
1258 #define GET_FPU_ERR(x) (static_cast<int>(x & kFCSRFlagMask))
1259 #define CHECK_ROUND_RESULT(type) \
1260 CHECK(GET_FPU_ERR(t.type##_err1_out) & kFCSRInexactFlagMask); \
1261 CHECK_EQ(0, GET_FPU_ERR(t.type##_err2_out)); \
1262 CHECK(GET_FPU_ERR(t.type##_err3_out) & kFCSRInvalidOpFlagMask); \
1263 CHECK(GET_FPU_ERR(t.type##_err4_out) & kFCSRInvalidOpFlagMask); \
1264 CHECK_EQ(static_cast<int32_t>(kFPUInvalidResult), t.type##_invalid_result);
1266 CHECK_ROUND_RESULT(round);
1267 CHECK_ROUND_RESULT(floor);
1268 CHECK_ROUND_RESULT(ceil);
1269 CHECK_ROUND_RESULT(cvt);
1274 // Test chaining of label usages within instructions (issue 1644).
1275 CcTest::InitializeVM();
1276 Isolate* isolate = CcTest::i_isolate();
1277 HandleScope scope(isolate);
1278 Assembler assm(isolate, NULL, 0);
1281 __ beq(v0, v1, &target);
1283 __ bne(v0, v1, &target);
1290 // ----- mips64 tests -----------------------------------------------
1293 // Test 64-bit memory loads and stores.
1294 CcTest::InitializeVM();
1295 Isolate* isolate = CcTest::i_isolate();
1296 HandleScope scope(isolate);
1310 Assembler assm(isolate, NULL, 0);
1313 // Basic 32-bit word load/store, with un-signed data.
1314 __ lw(a4, MemOperand(a0, OFFSET_OF(T, ui)));
1315 __ sw(a4, MemOperand(a0, OFFSET_OF(T, r1)));
1317 // Check that the data got zero-extended into 64-bit a4.
1318 __ sd(a4, MemOperand(a0, OFFSET_OF(T, r2)));
1320 // Basic 32-bit word load/store, with SIGNED data.
1321 __ lw(a5, MemOperand(a0, OFFSET_OF(T, si)));
1322 __ sw(a5, MemOperand(a0, OFFSET_OF(T, r3)));
1324 // Check that the data got sign-extended into 64-bit a4.
1325 __ sd(a5, MemOperand(a0, OFFSET_OF(T, r4)));
1327 // 32-bit UNSIGNED word load/store, with SIGNED data.
1328 __ lwu(a6, MemOperand(a0, OFFSET_OF(T, si)));
1329 __ sw(a6, MemOperand(a0, OFFSET_OF(T, r5)));
1331 // Check that the data got zero-extended into 64-bit a4.
1332 __ sd(a6, MemOperand(a0, OFFSET_OF(T, r6)));
1334 // lh with positive data.
1335 __ lh(a5, MemOperand(a0, OFFSET_OF(T, ui)));
1336 __ sw(a5, MemOperand(a0, OFFSET_OF(T, r2)));
1338 // lh with negative data.
1339 __ lh(a6, MemOperand(a0, OFFSET_OF(T, si)));
1340 __ sw(a6, MemOperand(a0, OFFSET_OF(T, r3)));
1342 // lhu with negative data.
1343 __ lhu(a7, MemOperand(a0, OFFSET_OF(T, si)));
1344 __ sw(a7, MemOperand(a0, OFFSET_OF(T, r4)));
1346 // lb with negative data.
1347 __ lb(t0, MemOperand(a0, OFFSET_OF(T, si)));
1348 __ sw(t0, MemOperand(a0, OFFSET_OF(T, r5)));
1350 // // sh writes only 1/2 of word.
1352 __ ori(t1, t1, 0x3333);
1353 __ sw(t1, MemOperand(a0, OFFSET_OF(T, r6)));
1354 __ lhu(t1, MemOperand(a0, OFFSET_OF(T, si)));
1355 __ sh(t1, MemOperand(a0, OFFSET_OF(T, r6)));
1361 assm.GetCode(&desc);
1362 Handle<Code> code = isolate->factory()->NewCode(
1363 desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
1364 F3 f = FUNCTION_CAST<F3>(code->entry());
1367 t.r1 = 0x1111111111111111;
1368 t.r2 = 0x2222222222222222;
1369 t.r3 = 0x3333333333333333;
1370 t.r4 = 0x4444444444444444;
1371 t.r5 = 0x5555555555555555;
1372 t.r6 = 0x6666666666666666;
1373 Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
1376 // Unsigned data, 32 & 64.
1377 CHECK_EQ(0x1111111144332211L, t.r1);
1378 CHECK_EQ(0x0000000000002211L, t.r2);
1380 // Signed data, 32 & 64.
1381 CHECK_EQ(0x33333333ffffbbccL, t.r3);
1382 CHECK_EQ(0xffffffff0000bbccL, t.r4);
1384 // Signed data, 32 & 64.
1385 CHECK_EQ(0x55555555ffffffccL, t.r5);
1386 CHECK_EQ(0x000000003333bbccL, t.r6);