1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
7 #if V8_TARGET_ARCH_MIPS64
9 #include "src/codegen.h"
10 #include "src/macro-assembler.h"
11 #include "src/mips64/simulator-mips64.h"
20 #if defined(USE_SIMULATOR)
21 byte* fast_exp_mips_machine_code = NULL;
22 double fast_exp_simulator(double x) {
23 return Simulator::current(Isolate::Current())->CallFP(
24 fast_exp_mips_machine_code, x, 0);
29 UnaryMathFunction CreateExpFunction() {
30 if (!FLAG_fast_math) return &std::exp;
33 static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
34 if (buffer == NULL) return &std::exp;
35 ExternalReference::InitializeMathExpData();
37 MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
40 DoubleRegister input = f12;
41 DoubleRegister result = f0;
42 DoubleRegister double_scratch1 = f4;
43 DoubleRegister double_scratch2 = f6;
48 if (!IsMipsSoftFloatABI) {
49 // Input value is in f12 anyway, nothing to do.
51 __ Move(input, a0, a1);
53 __ Push(temp3, temp2, temp1);
54 MathExpGenerator::EmitMathExp(
55 &masm, input, result, double_scratch1, double_scratch2,
57 __ Pop(temp3, temp2, temp1);
58 if (!IsMipsSoftFloatABI) {
59 // Result is already in f0, nothing to do.
61 __ Move(v0, v1, result);
68 DCHECK(!RelocInfo::RequiresRelocation(desc));
70 CpuFeatures::FlushICache(buffer, actual_size);
71 base::OS::ProtectCode(buffer, actual_size);
73 #if !defined(USE_SIMULATOR)
74 return FUNCTION_CAST<UnaryMathFunction>(buffer);
76 fast_exp_mips_machine_code = buffer;
77 return &fast_exp_simulator;
82 #if defined(V8_HOST_ARCH_MIPS)
83 MemCopyUint8Function CreateMemCopyUint8Function(MemCopyUint8Function stub) {
84 #if defined(USE_SIMULATOR)
90 static_cast<byte*>(base::OS::Allocate(3 * KB, &actual_size, true));
91 if (buffer == NULL) return stub;
93 // This code assumes that cache lines are 32 bytes and if the cache line is
94 // larger it will not work correctly.
95 MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
98 Label lastb, unaligned, aligned, chkw,
99 loop16w, chk1w, wordCopy_loop, skip_pref, lastbloop,
100 leave, ua_chk16w, ua_loop16w, ua_skip_pref, ua_chkw,
101 ua_chk1w, ua_wordCopy_loop, ua_smallCopy, ua_smallCopy_loop;
103 // The size of each prefetch.
104 uint32_t pref_chunk = 32;
105 // The maximum size of a prefetch, it must not be less then pref_chunk.
106 // If the real size of a prefetch is greater then max_pref_size and
107 // the kPrefHintPrepareForStore hint is used, the code will not work
109 uint32_t max_pref_size = 128;
110 DCHECK(pref_chunk < max_pref_size);
112 // pref_limit is set based on the fact that we never use an offset
113 // greater then 5 on a store pref and that a single pref can
114 // never be larger then max_pref_size.
115 uint32_t pref_limit = (5 * pref_chunk) + max_pref_size;
116 int32_t pref_hint_load = kPrefHintLoadStreamed;
117 int32_t pref_hint_store = kPrefHintPrepareForStore;
118 uint32_t loadstore_chunk = 4;
120 // The initial prefetches may fetch bytes that are before the buffer being
121 // copied. Start copies with an offset of 4 so avoid this situation when
122 // using kPrefHintPrepareForStore.
123 DCHECK(pref_hint_store != kPrefHintPrepareForStore ||
124 pref_chunk * 4 >= max_pref_size);
125 // If the size is less than 8, go to lastb. Regardless of size,
126 // copy dst pointer to v0 for the retuen value.
127 __ slti(a6, a2, 2 * loadstore_chunk);
128 __ bne(a6, zero_reg, &lastb);
129 __ mov(v0, a0); // In delay slot.
131 // If src and dst have different alignments, go to unaligned, if they
132 // have the same alignment (but are not actually aligned) do a partial
133 // load/store to make them aligned. If they are both already aligned
134 // we can start copying at aligned.
136 __ andi(t8, t8, loadstore_chunk - 1); // t8 is a0/a1 word-displacement.
137 __ bne(t8, zero_reg, &unaligned);
138 __ subu(a3, zero_reg, a0); // In delay slot.
140 __ andi(a3, a3, loadstore_chunk - 1); // Copy a3 bytes to align a0/a1.
141 __ beq(a3, zero_reg, &aligned); // Already aligned.
142 __ subu(a2, a2, a3); // In delay slot. a2 is the remining bytes count.
144 __ lwr(t8, MemOperand(a1));
146 __ swr(t8, MemOperand(a0));
149 // Now dst/src are both aligned to (word) aligned addresses. Set a2 to
150 // count how many bytes we have to copy after all the 64 byte chunks are
151 // copied and a3 to the dst pointer after all the 64 byte chunks have been
152 // copied. We will loop, incrementing a0 and a1 until a0 equals a3.
154 __ andi(t8, a2, 0x3f);
155 __ beq(a2, t8, &chkw); // Less than 64?
156 __ subu(a3, a2, t8); // In delay slot.
157 __ addu(a3, a0, a3); // Now a3 is the final dst after loop.
159 // When in the loop we prefetch with kPrefHintPrepareForStore hint,
160 // in this case the a0+x should be past the "a4-32" address. This means:
161 // for x=128 the last "safe" a0 address is "a4-160". Alternatively, for
162 // x=64 the last "safe" a0 address is "a4-96". In the current version we
163 // will use "pref hint, 128(a0)", so "a4-160" is the limit.
164 if (pref_hint_store == kPrefHintPrepareForStore) {
165 __ addu(a4, a0, a2); // a4 is the "past the end" address.
166 __ Subu(t9, a4, pref_limit); // t9 is the "last safe pref" address.
169 __ Pref(pref_hint_load, MemOperand(a1, 0 * pref_chunk));
170 __ Pref(pref_hint_load, MemOperand(a1, 1 * pref_chunk));
171 __ Pref(pref_hint_load, MemOperand(a1, 2 * pref_chunk));
172 __ Pref(pref_hint_load, MemOperand(a1, 3 * pref_chunk));
174 if (pref_hint_store != kPrefHintPrepareForStore) {
175 __ Pref(pref_hint_store, MemOperand(a0, 1 * pref_chunk));
176 __ Pref(pref_hint_store, MemOperand(a0, 2 * pref_chunk));
177 __ Pref(pref_hint_store, MemOperand(a0, 3 * pref_chunk));
180 __ lw(a4, MemOperand(a1));
182 if (pref_hint_store == kPrefHintPrepareForStore) {
183 __ sltu(v1, t9, a0); // If a0 > t9, don't use next prefetch.
184 __ Branch(USE_DELAY_SLOT, &skip_pref, gt, v1, Operand(zero_reg));
186 __ lw(a5, MemOperand(a1, 1, loadstore_chunk)); // Maybe in delay slot.
188 __ Pref(pref_hint_store, MemOperand(a0, 4 * pref_chunk));
189 __ Pref(pref_hint_store, MemOperand(a0, 5 * pref_chunk));
192 __ lw(a6, MemOperand(a1, 2, loadstore_chunk));
193 __ lw(a7, MemOperand(a1, 3, loadstore_chunk));
194 __ lw(t0, MemOperand(a1, 4, loadstore_chunk));
195 __ lw(t1, MemOperand(a1, 5, loadstore_chunk));
196 __ lw(t2, MemOperand(a1, 6, loadstore_chunk));
197 __ lw(t3, MemOperand(a1, 7, loadstore_chunk));
198 __ Pref(pref_hint_load, MemOperand(a1, 4 * pref_chunk));
200 __ sw(a4, MemOperand(a0));
201 __ sw(a5, MemOperand(a0, 1, loadstore_chunk));
202 __ sw(a6, MemOperand(a0, 2, loadstore_chunk));
203 __ sw(a7, MemOperand(a0, 3, loadstore_chunk));
204 __ sw(t0, MemOperand(a0, 4, loadstore_chunk));
205 __ sw(t1, MemOperand(a0, 5, loadstore_chunk));
206 __ sw(t2, MemOperand(a0, 6, loadstore_chunk));
207 __ sw(t3, MemOperand(a0, 7, loadstore_chunk));
209 __ lw(a4, MemOperand(a1, 8, loadstore_chunk));
210 __ lw(a5, MemOperand(a1, 9, loadstore_chunk));
211 __ lw(a6, MemOperand(a1, 10, loadstore_chunk));
212 __ lw(a7, MemOperand(a1, 11, loadstore_chunk));
213 __ lw(t0, MemOperand(a1, 12, loadstore_chunk));
214 __ lw(t1, MemOperand(a1, 13, loadstore_chunk));
215 __ lw(t2, MemOperand(a1, 14, loadstore_chunk));
216 __ lw(t3, MemOperand(a1, 15, loadstore_chunk));
217 __ Pref(pref_hint_load, MemOperand(a1, 5 * pref_chunk));
219 __ sw(a4, MemOperand(a0, 8, loadstore_chunk));
220 __ sw(a5, MemOperand(a0, 9, loadstore_chunk));
221 __ sw(a6, MemOperand(a0, 10, loadstore_chunk));
222 __ sw(a7, MemOperand(a0, 11, loadstore_chunk));
223 __ sw(t0, MemOperand(a0, 12, loadstore_chunk));
224 __ sw(t1, MemOperand(a0, 13, loadstore_chunk));
225 __ sw(t2, MemOperand(a0, 14, loadstore_chunk));
226 __ sw(t3, MemOperand(a0, 15, loadstore_chunk));
227 __ addiu(a0, a0, 16 * loadstore_chunk);
228 __ bne(a0, a3, &loop16w);
229 __ addiu(a1, a1, 16 * loadstore_chunk); // In delay slot.
232 // Here we have src and dest word-aligned but less than 64-bytes to go.
233 // Check for a 32 bytes chunk and copy if there is one. Otherwise jump
234 // down to chk1w to handle the tail end of the copy.
236 __ Pref(pref_hint_load, MemOperand(a1, 0 * pref_chunk));
237 __ andi(t8, a2, 0x1f);
238 __ beq(a2, t8, &chk1w); // Less than 32?
239 __ nop(); // In delay slot.
240 __ lw(a4, MemOperand(a1));
241 __ lw(a5, MemOperand(a1, 1, loadstore_chunk));
242 __ lw(a6, MemOperand(a1, 2, loadstore_chunk));
243 __ lw(a7, MemOperand(a1, 3, loadstore_chunk));
244 __ lw(t0, MemOperand(a1, 4, loadstore_chunk));
245 __ lw(t1, MemOperand(a1, 5, loadstore_chunk));
246 __ lw(t2, MemOperand(a1, 6, loadstore_chunk));
247 __ lw(t3, MemOperand(a1, 7, loadstore_chunk));
248 __ addiu(a1, a1, 8 * loadstore_chunk);
249 __ sw(a4, MemOperand(a0));
250 __ sw(a5, MemOperand(a0, 1, loadstore_chunk));
251 __ sw(a6, MemOperand(a0, 2, loadstore_chunk));
252 __ sw(a7, MemOperand(a0, 3, loadstore_chunk));
253 __ sw(t0, MemOperand(a0, 4, loadstore_chunk));
254 __ sw(t1, MemOperand(a0, 5, loadstore_chunk));
255 __ sw(t2, MemOperand(a0, 6, loadstore_chunk));
256 __ sw(t3, MemOperand(a0, 7, loadstore_chunk));
257 __ addiu(a0, a0, 8 * loadstore_chunk);
259 // Here we have less than 32 bytes to copy. Set up for a loop to copy
260 // one word at a time. Set a2 to count how many bytes we have to copy
261 // after all the word chunks are copied and a3 to the dst pointer after
262 // all the word chunks have been copied. We will loop, incrementing a0
263 // and a1 untill a0 equals a3.
265 __ andi(a2, t8, loadstore_chunk - 1);
266 __ beq(a2, t8, &lastb);
267 __ subu(a3, t8, a2); // In delay slot.
270 __ bind(&wordCopy_loop);
271 __ lw(a7, MemOperand(a1));
272 __ addiu(a0, a0, loadstore_chunk);
273 __ addiu(a1, a1, loadstore_chunk);
274 __ bne(a0, a3, &wordCopy_loop);
275 __ sw(a7, MemOperand(a0, -1, loadstore_chunk)); // In delay slot.
278 __ Branch(&leave, le, a2, Operand(zero_reg));
282 __ lb(v1, MemOperand(a1));
285 __ bne(a0, a3, &lastbloop);
286 __ sb(v1, MemOperand(a0, -1)); // In delay slot.
292 // Unaligned case. Only the dst gets aligned so we need to do partial
293 // loads of the source followed by normal stores to the dst (once we
294 // have aligned the destination).
296 __ andi(a3, a3, loadstore_chunk - 1); // Copy a3 bytes to align a0/a1.
297 __ beq(a3, zero_reg, &ua_chk16w);
298 __ subu(a2, a2, a3); // In delay slot.
300 __ lwr(v1, MemOperand(a1));
302 MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
304 __ swr(v1, MemOperand(a0));
307 // Now the dst (but not the source) is aligned. Set a2 to count how many
308 // bytes we have to copy after all the 64 byte chunks are copied and a3 to
309 // the dst pointer after all the 64 byte chunks have been copied. We will
310 // loop, incrementing a0 and a1 until a0 equals a3.
312 __ andi(t8, a2, 0x3f);
313 __ beq(a2, t8, &ua_chkw);
314 __ subu(a3, a2, t8); // In delay slot.
317 if (pref_hint_store == kPrefHintPrepareForStore) {
319 __ Subu(t9, a4, pref_limit);
322 __ Pref(pref_hint_load, MemOperand(a1, 0 * pref_chunk));
323 __ Pref(pref_hint_load, MemOperand(a1, 1 * pref_chunk));
324 __ Pref(pref_hint_load, MemOperand(a1, 2 * pref_chunk));
326 if (pref_hint_store != kPrefHintPrepareForStore) {
327 __ Pref(pref_hint_store, MemOperand(a0, 1 * pref_chunk));
328 __ Pref(pref_hint_store, MemOperand(a0, 2 * pref_chunk));
329 __ Pref(pref_hint_store, MemOperand(a0, 3 * pref_chunk));
332 __ bind(&ua_loop16w);
333 __ Pref(pref_hint_load, MemOperand(a1, 3 * pref_chunk));
334 __ lwr(a4, MemOperand(a1));
335 __ lwr(a5, MemOperand(a1, 1, loadstore_chunk));
336 __ lwr(a6, MemOperand(a1, 2, loadstore_chunk));
338 if (pref_hint_store == kPrefHintPrepareForStore) {
340 __ Branch(USE_DELAY_SLOT, &ua_skip_pref, gt, v1, Operand(zero_reg));
342 __ lwr(a7, MemOperand(a1, 3, loadstore_chunk)); // Maybe in delay slot.
344 __ Pref(pref_hint_store, MemOperand(a0, 4 * pref_chunk));
345 __ Pref(pref_hint_store, MemOperand(a0, 5 * pref_chunk));
347 __ bind(&ua_skip_pref);
348 __ lwr(t0, MemOperand(a1, 4, loadstore_chunk));
349 __ lwr(t1, MemOperand(a1, 5, loadstore_chunk));
350 __ lwr(t2, MemOperand(a1, 6, loadstore_chunk));
351 __ lwr(t3, MemOperand(a1, 7, loadstore_chunk));
353 MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
355 MemOperand(a1, 2, loadstore_chunk, MemOperand::offset_minus_one));
357 MemOperand(a1, 3, loadstore_chunk, MemOperand::offset_minus_one));
359 MemOperand(a1, 4, loadstore_chunk, MemOperand::offset_minus_one));
361 MemOperand(a1, 5, loadstore_chunk, MemOperand::offset_minus_one));
363 MemOperand(a1, 6, loadstore_chunk, MemOperand::offset_minus_one));
365 MemOperand(a1, 7, loadstore_chunk, MemOperand::offset_minus_one));
367 MemOperand(a1, 8, loadstore_chunk, MemOperand::offset_minus_one));
368 __ Pref(pref_hint_load, MemOperand(a1, 4 * pref_chunk));
369 __ sw(a4, MemOperand(a0));
370 __ sw(a5, MemOperand(a0, 1, loadstore_chunk));
371 __ sw(a6, MemOperand(a0, 2, loadstore_chunk));
372 __ sw(a7, MemOperand(a0, 3, loadstore_chunk));
373 __ sw(t0, MemOperand(a0, 4, loadstore_chunk));
374 __ sw(t1, MemOperand(a0, 5, loadstore_chunk));
375 __ sw(t2, MemOperand(a0, 6, loadstore_chunk));
376 __ sw(t3, MemOperand(a0, 7, loadstore_chunk));
377 __ lwr(a4, MemOperand(a1, 8, loadstore_chunk));
378 __ lwr(a5, MemOperand(a1, 9, loadstore_chunk));
379 __ lwr(a6, MemOperand(a1, 10, loadstore_chunk));
380 __ lwr(a7, MemOperand(a1, 11, loadstore_chunk));
381 __ lwr(t0, MemOperand(a1, 12, loadstore_chunk));
382 __ lwr(t1, MemOperand(a1, 13, loadstore_chunk));
383 __ lwr(t2, MemOperand(a1, 14, loadstore_chunk));
384 __ lwr(t3, MemOperand(a1, 15, loadstore_chunk));
386 MemOperand(a1, 9, loadstore_chunk, MemOperand::offset_minus_one));
388 MemOperand(a1, 10, loadstore_chunk, MemOperand::offset_minus_one));
390 MemOperand(a1, 11, loadstore_chunk, MemOperand::offset_minus_one));
392 MemOperand(a1, 12, loadstore_chunk, MemOperand::offset_minus_one));
394 MemOperand(a1, 13, loadstore_chunk, MemOperand::offset_minus_one));
396 MemOperand(a1, 14, loadstore_chunk, MemOperand::offset_minus_one));
398 MemOperand(a1, 15, loadstore_chunk, MemOperand::offset_minus_one));
400 MemOperand(a1, 16, loadstore_chunk, MemOperand::offset_minus_one));
401 __ Pref(pref_hint_load, MemOperand(a1, 5 * pref_chunk));
402 __ sw(a4, MemOperand(a0, 8, loadstore_chunk));
403 __ sw(a5, MemOperand(a0, 9, loadstore_chunk));
404 __ sw(a6, MemOperand(a0, 10, loadstore_chunk));
405 __ sw(a7, MemOperand(a0, 11, loadstore_chunk));
406 __ sw(t0, MemOperand(a0, 12, loadstore_chunk));
407 __ sw(t1, MemOperand(a0, 13, loadstore_chunk));
408 __ sw(t2, MemOperand(a0, 14, loadstore_chunk));
409 __ sw(t3, MemOperand(a0, 15, loadstore_chunk));
410 __ addiu(a0, a0, 16 * loadstore_chunk);
411 __ bne(a0, a3, &ua_loop16w);
412 __ addiu(a1, a1, 16 * loadstore_chunk); // In delay slot.
415 // Here less than 64-bytes. Check for
416 // a 32 byte chunk and copy if there is one. Otherwise jump down to
417 // ua_chk1w to handle the tail end of the copy.
419 __ Pref(pref_hint_load, MemOperand(a1));
420 __ andi(t8, a2, 0x1f);
422 __ beq(a2, t8, &ua_chk1w);
423 __ nop(); // In delay slot.
424 __ lwr(a4, MemOperand(a1));
425 __ lwr(a5, MemOperand(a1, 1, loadstore_chunk));
426 __ lwr(a6, MemOperand(a1, 2, loadstore_chunk));
427 __ lwr(a7, MemOperand(a1, 3, loadstore_chunk));
428 __ lwr(t0, MemOperand(a1, 4, loadstore_chunk));
429 __ lwr(t1, MemOperand(a1, 5, loadstore_chunk));
430 __ lwr(t2, MemOperand(a1, 6, loadstore_chunk));
431 __ lwr(t3, MemOperand(a1, 7, loadstore_chunk));
433 MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
435 MemOperand(a1, 2, loadstore_chunk, MemOperand::offset_minus_one));
437 MemOperand(a1, 3, loadstore_chunk, MemOperand::offset_minus_one));
439 MemOperand(a1, 4, loadstore_chunk, MemOperand::offset_minus_one));
441 MemOperand(a1, 5, loadstore_chunk, MemOperand::offset_minus_one));
443 MemOperand(a1, 6, loadstore_chunk, MemOperand::offset_minus_one));
445 MemOperand(a1, 7, loadstore_chunk, MemOperand::offset_minus_one));
447 MemOperand(a1, 8, loadstore_chunk, MemOperand::offset_minus_one));
448 __ addiu(a1, a1, 8 * loadstore_chunk);
449 __ sw(a4, MemOperand(a0));
450 __ sw(a5, MemOperand(a0, 1, loadstore_chunk));
451 __ sw(a6, MemOperand(a0, 2, loadstore_chunk));
452 __ sw(a7, MemOperand(a0, 3, loadstore_chunk));
453 __ sw(t0, MemOperand(a0, 4, loadstore_chunk));
454 __ sw(t1, MemOperand(a0, 5, loadstore_chunk));
455 __ sw(t2, MemOperand(a0, 6, loadstore_chunk));
456 __ sw(t3, MemOperand(a0, 7, loadstore_chunk));
457 __ addiu(a0, a0, 8 * loadstore_chunk);
459 // Less than 32 bytes to copy. Set up for a loop to
460 // copy one word at a time.
462 __ andi(a2, t8, loadstore_chunk - 1);
463 __ beq(a2, t8, &ua_smallCopy);
464 __ subu(a3, t8, a2); // In delay slot.
467 __ bind(&ua_wordCopy_loop);
468 __ lwr(v1, MemOperand(a1));
470 MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
471 __ addiu(a0, a0, loadstore_chunk);
472 __ addiu(a1, a1, loadstore_chunk);
473 __ bne(a0, a3, &ua_wordCopy_loop);
474 __ sw(v1, MemOperand(a0, -1, loadstore_chunk)); // In delay slot.
476 // Copy the last 8 bytes.
477 __ bind(&ua_smallCopy);
478 __ beq(a2, zero_reg, &leave);
479 __ addu(a3, a0, a2); // In delay slot.
481 __ bind(&ua_smallCopy_loop);
482 __ lb(v1, MemOperand(a1));
485 __ bne(a0, a3, &ua_smallCopy_loop);
486 __ sb(v1, MemOperand(a0, -1)); // In delay slot.
493 DCHECK(!RelocInfo::RequiresRelocation(desc));
495 CpuFeatures::FlushICache(buffer, actual_size);
496 base::OS::ProtectCode(buffer, actual_size);
497 return FUNCTION_CAST<MemCopyUint8Function>(buffer);
502 UnaryMathFunction CreateSqrtFunction() {
503 #if defined(USE_SIMULATOR)
508 static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
509 if (buffer == NULL) return &std::sqrt;
511 MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
513 __ MovFromFloatParameter(f12);
515 __ MovToFloatResult(f0);
520 DCHECK(!RelocInfo::RequiresRelocation(desc));
522 CpuFeatures::FlushICache(buffer, actual_size);
523 base::OS::ProtectCode(buffer, actual_size);
524 return FUNCTION_CAST<UnaryMathFunction>(buffer);
531 // -------------------------------------------------------------------------
532 // Platform-specific RuntimeCallHelper functions.
534 void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
535 masm->EnterFrame(StackFrame::INTERNAL);
536 DCHECK(!masm->has_frame());
537 masm->set_has_frame(true);
541 void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
542 masm->LeaveFrame(StackFrame::INTERNAL);
543 DCHECK(masm->has_frame());
544 masm->set_has_frame(false);
548 // -------------------------------------------------------------------------
551 #define __ ACCESS_MASM(masm)
553 void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
554 MacroAssembler* masm,
559 AllocationSiteMode mode,
560 Label* allocation_memento_found) {
561 Register scratch_elements = a4;
562 DCHECK(!AreAliased(receiver, key, value, target_map,
565 if (mode == TRACK_ALLOCATION_SITE) {
566 __ JumpIfJSArrayHasAllocationMemento(
567 receiver, scratch_elements, allocation_memento_found);
570 // Set transitioned map.
571 __ sd(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
572 __ RecordWriteField(receiver,
573 HeapObject::kMapOffset,
583 void ElementsTransitionGenerator::GenerateSmiToDouble(
584 MacroAssembler* masm,
589 AllocationSiteMode mode,
591 // Register ra contains the return address.
592 Label loop, entry, convert_hole, gc_required, only_change_map, done;
593 Register elements = a4;
594 Register length = a5;
596 Register array_end = array;
598 // target_map parameter can be clobbered.
599 Register scratch1 = target_map;
600 Register scratch2 = t1;
601 Register scratch3 = a7;
603 // Verify input registers don't conflict with locals.
604 DCHECK(!AreAliased(receiver, key, value, target_map,
605 elements, length, array, scratch2));
607 Register scratch = t2;
608 if (mode == TRACK_ALLOCATION_SITE) {
609 __ JumpIfJSArrayHasAllocationMemento(receiver, elements, fail);
612 // Check for empty arrays, which only require a map transition and no changes
613 // to the backing store.
614 __ ld(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
615 __ LoadRoot(at, Heap::kEmptyFixedArrayRootIndex);
616 __ Branch(&only_change_map, eq, at, Operand(elements));
619 __ ld(length, FieldMemOperand(elements, FixedArray::kLengthOffset));
620 // elements: source FixedArray
621 // length: number of elements (smi-tagged)
623 // Allocate new FixedDoubleArray.
624 __ SmiScale(scratch, length, kDoubleSizeLog2);
625 __ Daddu(scratch, scratch, FixedDoubleArray::kHeaderSize);
626 __ Allocate(scratch, array, t3, scratch2, &gc_required, DOUBLE_ALIGNMENT);
627 // array: destination FixedDoubleArray, not tagged as heap object
629 // Set destination FixedDoubleArray's length and map.
630 __ LoadRoot(scratch2, Heap::kFixedDoubleArrayMapRootIndex);
631 __ sd(length, MemOperand(array, FixedDoubleArray::kLengthOffset));
632 // Update receiver's map.
633 __ sd(scratch2, MemOperand(array, HeapObject::kMapOffset));
635 __ sd(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
636 __ RecordWriteField(receiver,
637 HeapObject::kMapOffset,
644 // Replace receiver's backing store with newly created FixedDoubleArray.
645 __ Daddu(scratch1, array, Operand(kHeapObjectTag));
646 __ sd(scratch1, FieldMemOperand(a2, JSObject::kElementsOffset));
647 __ RecordWriteField(receiver,
648 JSObject::kElementsOffset,
657 // Prepare for conversion loop.
658 __ Daddu(scratch1, elements,
659 Operand(FixedArray::kHeaderSize - kHeapObjectTag));
660 __ Daddu(scratch3, array, Operand(FixedDoubleArray::kHeaderSize));
661 __ SmiScale(array_end, length, kDoubleSizeLog2);
662 __ Daddu(array_end, array_end, scratch3);
664 // Repurpose registers no longer in use.
665 Register hole_lower = elements;
666 Register hole_upper = length;
667 __ li(hole_lower, Operand(kHoleNanLower32));
668 // scratch1: begin of source FixedArray element fields, not tagged
669 // hole_lower: kHoleNanLower32
670 // hole_upper: kHoleNanUpper32
671 // array_end: end of destination FixedDoubleArray, not tagged
672 // scratch3: begin of FixedDoubleArray element fields, not tagged
673 __ Branch(USE_DELAY_SLOT, &entry);
674 __ li(hole_upper, Operand(kHoleNanUpper32)); // In delay slot.
676 __ bind(&only_change_map);
677 __ sd(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
678 __ RecordWriteField(receiver,
679 HeapObject::kMapOffset,
688 // Call into runtime if GC is required.
689 __ bind(&gc_required);
690 __ ld(ra, MemOperand(sp, 0));
691 __ Branch(USE_DELAY_SLOT, fail);
692 __ daddiu(sp, sp, kPointerSize); // In delay slot.
694 // Convert and copy elements.
696 __ ld(scratch2, MemOperand(scratch1));
697 __ Daddu(scratch1, scratch1, kIntSize);
698 // scratch2: current element
699 __ JumpIfNotSmi(scratch2, &convert_hole);
700 __ SmiUntag(scratch2);
702 // Normal smi, convert to double and store.
703 __ mtc1(scratch2, f0);
705 __ sdc1(f0, MemOperand(scratch3));
706 __ Branch(USE_DELAY_SLOT, &entry);
707 __ daddiu(scratch3, scratch3, kDoubleSize); // In delay slot.
709 // Hole found, store the-hole NaN.
710 __ bind(&convert_hole);
711 if (FLAG_debug_code) {
712 // Restore a "smi-untagged" heap object.
713 __ Or(scratch2, scratch2, Operand(1));
714 __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
715 __ Assert(eq, kObjectFoundInSmiOnlyArray, at, Operand(scratch2));
718 __ sw(hole_lower, MemOperand(scratch3));
720 __ sw(hole_upper, MemOperand(scratch3, kIntSize));
721 __ Daddu(scratch3, scratch3, kDoubleSize);
724 __ Branch(&loop, lt, scratch3, Operand(array_end));
731 void ElementsTransitionGenerator::GenerateDoubleToObject(
732 MacroAssembler* masm,
737 AllocationSiteMode mode,
739 // Register ra contains the return address.
740 Label entry, loop, convert_hole, gc_required, only_change_map;
741 Register elements = a4;
743 Register length = a5;
744 Register scratch = t1;
746 // Verify input registers don't conflict with locals.
747 DCHECK(!AreAliased(receiver, key, value, target_map,
748 elements, array, length, scratch));
749 if (mode == TRACK_ALLOCATION_SITE) {
750 __ JumpIfJSArrayHasAllocationMemento(receiver, elements, fail);
753 // Check for empty arrays, which only require a map transition and no changes
754 // to the backing store.
755 __ ld(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
756 __ LoadRoot(at, Heap::kEmptyFixedArrayRootIndex);
757 __ Branch(&only_change_map, eq, at, Operand(elements));
760 value.bit() | key.bit() | receiver.bit() | target_map.bit() | ra.bit());
762 __ ld(length, FieldMemOperand(elements, FixedArray::kLengthOffset));
763 // elements: source FixedArray
764 // length: number of elements (smi-tagged)
766 // Allocate new FixedArray.
767 // Re-use value and target_map registers, as they have been saved on the
769 Register array_size = value;
770 Register allocate_scratch = target_map;
771 __ SmiScale(array_size, length, kPointerSizeLog2);
772 __ Daddu(array_size, array_size, FixedDoubleArray::kHeaderSize);
773 __ Allocate(array_size, array, allocate_scratch, scratch, &gc_required,
774 NO_ALLOCATION_FLAGS);
775 // array: destination FixedArray, not tagged as heap object
776 // Set destination FixedDoubleArray's length and map.
777 __ LoadRoot(scratch, Heap::kFixedArrayMapRootIndex);
778 __ sd(length, MemOperand(array, FixedDoubleArray::kLengthOffset));
779 __ sd(scratch, MemOperand(array, HeapObject::kMapOffset));
781 // Prepare for conversion loop.
782 Register src_elements = elements;
783 Register dst_elements = target_map;
784 Register dst_end = length;
785 Register heap_number_map = scratch;
786 __ Daddu(src_elements, src_elements,
787 Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag + 4));
788 __ Daddu(dst_elements, array, Operand(FixedArray::kHeaderSize));
789 __ SmiScale(dst_end, dst_end, kPointerSizeLog2);
790 __ Daddu(dst_end, dst_elements, dst_end);
792 // Allocating heap numbers in the loop below can fail and cause a jump to
793 // gc_required. We can't leave a partly initialized FixedArray behind,
794 // so pessimistically fill it with holes now.
795 Label initialization_loop, initialization_loop_entry;
796 __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
797 __ Branch(&initialization_loop_entry);
798 __ bind(&initialization_loop);
799 __ sd(scratch, MemOperand(dst_elements));
800 __ Daddu(dst_elements, dst_elements, Operand(kPointerSize));
801 __ bind(&initialization_loop_entry);
802 __ Branch(&initialization_loop, lt, dst_elements, Operand(dst_end));
804 __ Daddu(dst_elements, array, Operand(FixedArray::kHeaderSize));
805 __ Daddu(array, array, Operand(kHeapObjectTag));
806 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
807 // Using offsetted addresses.
808 // dst_elements: begin of destination FixedArray element fields, not tagged
809 // src_elements: begin of source FixedDoubleArray element fields, not tagged,
810 // points to the exponent
811 // dst_end: end of destination FixedArray, not tagged
812 // array: destination FixedArray
813 // heap_number_map: heap number map
816 // Call into runtime if GC is required.
817 __ bind(&gc_required);
819 value.bit() | key.bit() | receiver.bit() | target_map.bit() | ra.bit());
824 Register upper_bits = key;
825 __ lw(upper_bits, MemOperand(src_elements));
826 __ Daddu(src_elements, src_elements, kDoubleSize);
827 // upper_bits: current element's upper 32 bit
828 // src_elements: address of next element's upper 32 bit
829 __ Branch(&convert_hole, eq, a1, Operand(kHoleNanUpper32));
831 // Non-hole double, copy value into a heap number.
832 Register heap_number = receiver;
833 Register scratch2 = value;
834 Register scratch3 = t2;
835 __ AllocateHeapNumber(heap_number, scratch2, scratch3, heap_number_map,
837 // heap_number: new heap number
838 // Load mantissa of current element, src_elements
839 // point to exponent of next element.
840 __ lw(scratch2, MemOperand(heap_number, -12));
841 __ sw(scratch2, FieldMemOperand(heap_number, HeapNumber::kMantissaOffset));
842 __ sw(upper_bits, FieldMemOperand(heap_number, HeapNumber::kExponentOffset));
843 __ mov(scratch2, dst_elements);
844 __ sd(heap_number, MemOperand(dst_elements));
845 __ Daddu(dst_elements, dst_elements, kPointerSize);
846 __ RecordWrite(array,
855 // Replace the-hole NaN with the-hole pointer.
856 __ bind(&convert_hole);
857 __ LoadRoot(scratch2, Heap::kTheHoleValueRootIndex);
858 __ sd(scratch2, MemOperand(dst_elements));
859 __ Daddu(dst_elements, dst_elements, kPointerSize);
862 __ Branch(&loop, lt, dst_elements, Operand(dst_end));
864 __ MultiPop(receiver.bit() | target_map.bit() | value.bit() | key.bit());
865 // Replace receiver's backing store with newly created and filled FixedArray.
866 __ sd(array, FieldMemOperand(receiver, JSObject::kElementsOffset));
867 __ RecordWriteField(receiver,
868 JSObject::kElementsOffset,
877 __ bind(&only_change_map);
878 // Update receiver's map.
879 __ sd(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
880 __ RecordWriteField(receiver,
881 HeapObject::kMapOffset,
891 void StringCharLoadGenerator::Generate(MacroAssembler* masm,
895 Label* call_runtime) {
896 // Fetch the instance type of the receiver into result register.
897 __ ld(result, FieldMemOperand(string, HeapObject::kMapOffset));
898 __ lbu(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
900 // We need special handling for indirect strings.
901 Label check_sequential;
902 __ And(at, result, Operand(kIsIndirectStringMask));
903 __ Branch(&check_sequential, eq, at, Operand(zero_reg));
905 // Dispatch on the indirect string shape: slice or cons.
907 __ And(at, result, Operand(kSlicedNotConsMask));
908 __ Branch(&cons_string, eq, at, Operand(zero_reg));
911 Label indirect_string_loaded;
912 __ ld(result, FieldMemOperand(string, SlicedString::kOffsetOffset));
913 __ ld(string, FieldMemOperand(string, SlicedString::kParentOffset));
914 __ dsra32(at, result, 0);
915 __ Daddu(index, index, at);
916 __ jmp(&indirect_string_loaded);
918 // Handle cons strings.
919 // Check whether the right hand side is the empty string (i.e. if
920 // this is really a flat string in a cons string). If that is not
921 // the case we would rather go to the runtime system now to flatten
923 __ bind(&cons_string);
924 __ ld(result, FieldMemOperand(string, ConsString::kSecondOffset));
925 __ LoadRoot(at, Heap::kempty_stringRootIndex);
926 __ Branch(call_runtime, ne, result, Operand(at));
927 // Get the first of the two strings and load its instance type.
928 __ ld(string, FieldMemOperand(string, ConsString::kFirstOffset));
930 __ bind(&indirect_string_loaded);
931 __ ld(result, FieldMemOperand(string, HeapObject::kMapOffset));
932 __ lbu(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
934 // Distinguish sequential and external strings. Only these two string
935 // representations can reach here (slices and flat cons strings have been
936 // reduced to the underlying sequential or external string).
937 Label external_string, check_encoding;
938 __ bind(&check_sequential);
939 STATIC_ASSERT(kSeqStringTag == 0);
940 __ And(at, result, Operand(kStringRepresentationMask));
941 __ Branch(&external_string, ne, at, Operand(zero_reg));
943 // Prepare sequential strings
944 STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
947 SeqTwoByteString::kHeaderSize - kHeapObjectTag);
948 __ jmp(&check_encoding);
950 // Handle external strings.
951 __ bind(&external_string);
952 if (FLAG_debug_code) {
953 // Assert that we do not have a cons or slice (indirect strings) here.
954 // Sequential strings have already been ruled out.
955 __ And(at, result, Operand(kIsIndirectStringMask));
956 __ Assert(eq, kExternalStringExpectedButNotFound,
957 at, Operand(zero_reg));
959 // Rule out short external strings.
960 STATIC_ASSERT(kShortExternalStringTag != 0);
961 __ And(at, result, Operand(kShortExternalStringMask));
962 __ Branch(call_runtime, ne, at, Operand(zero_reg));
963 __ ld(string, FieldMemOperand(string, ExternalString::kResourceDataOffset));
965 Label one_byte, done;
966 __ bind(&check_encoding);
967 STATIC_ASSERT(kTwoByteStringTag == 0);
968 __ And(at, result, Operand(kStringEncodingMask));
969 __ Branch(&one_byte, ne, at, Operand(zero_reg));
971 __ dsll(at, index, 1);
972 __ Daddu(at, string, at);
973 __ lhu(result, MemOperand(at));
977 __ Daddu(at, string, index);
978 __ lbu(result, MemOperand(at));
983 static MemOperand ExpConstant(int index, Register base) {
984 return MemOperand(base, index * kDoubleSize);
988 void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
989 DoubleRegister input,
990 DoubleRegister result,
991 DoubleRegister double_scratch1,
992 DoubleRegister double_scratch2,
996 DCHECK(!input.is(result));
997 DCHECK(!input.is(double_scratch1));
998 DCHECK(!input.is(double_scratch2));
999 DCHECK(!result.is(double_scratch1));
1000 DCHECK(!result.is(double_scratch2));
1001 DCHECK(!double_scratch1.is(double_scratch2));
1002 DCHECK(!temp1.is(temp2));
1003 DCHECK(!temp1.is(temp3));
1004 DCHECK(!temp2.is(temp3));
1005 DCHECK(ExternalReference::math_exp_constants(0).address() != NULL);
1006 DCHECK(!masm->serializer_enabled()); // External references not serializable.
1008 Label zero, infinity, done;
1009 __ li(temp3, Operand(ExternalReference::math_exp_constants(0)));
1011 __ ldc1(double_scratch1, ExpConstant(0, temp3));
1012 __ BranchF(&zero, NULL, ge, double_scratch1, input);
1014 __ ldc1(double_scratch2, ExpConstant(1, temp3));
1015 __ BranchF(&infinity, NULL, ge, input, double_scratch2);
1017 __ ldc1(double_scratch1, ExpConstant(3, temp3));
1018 __ ldc1(result, ExpConstant(4, temp3));
1019 __ mul_d(double_scratch1, double_scratch1, input);
1020 __ add_d(double_scratch1, double_scratch1, result);
1021 __ FmoveLow(temp2, double_scratch1);
1022 __ sub_d(double_scratch1, double_scratch1, result);
1023 __ ldc1(result, ExpConstant(6, temp3));
1024 __ ldc1(double_scratch2, ExpConstant(5, temp3));
1025 __ mul_d(double_scratch1, double_scratch1, double_scratch2);
1026 __ sub_d(double_scratch1, double_scratch1, input);
1027 __ sub_d(result, result, double_scratch1);
1028 __ mul_d(double_scratch2, double_scratch1, double_scratch1);
1029 __ mul_d(result, result, double_scratch2);
1030 __ ldc1(double_scratch2, ExpConstant(7, temp3));
1031 __ mul_d(result, result, double_scratch2);
1032 __ sub_d(result, result, double_scratch1);
1033 // Mov 1 in double_scratch2 as math_exp_constants_array[8] == 1.
1034 DCHECK(*reinterpret_cast<double*>
1035 (ExternalReference::math_exp_constants(8).address()) == 1);
1036 __ Move(double_scratch2, 1);
1037 __ add_d(result, result, double_scratch2);
1038 __ dsrl(temp1, temp2, 11);
1039 __ Ext(temp2, temp2, 0, 11);
1040 __ Daddu(temp1, temp1, Operand(0x3ff));
1042 // Must not call ExpConstant() after overwriting temp3!
1043 __ li(temp3, Operand(ExternalReference::math_exp_log_table()));
1044 __ dsll(at, temp2, 3);
1045 __ Daddu(temp3, temp3, Operand(at));
1046 __ lwu(temp2, MemOperand(temp3, 0));
1047 __ lwu(temp3, MemOperand(temp3, kIntSize));
1048 // The first word is loaded is the lower number register.
1049 if (temp2.code() < temp3.code()) {
1050 __ dsll(at, temp1, 20);
1051 __ Or(temp1, temp3, at);
1052 __ Move(double_scratch1, temp2, temp1);
1054 __ dsll(at, temp1, 20);
1055 __ Or(temp1, temp2, at);
1056 __ Move(double_scratch1, temp3, temp1);
1058 __ mul_d(result, result, double_scratch1);
1059 __ BranchShort(&done);
1062 __ Move(result, kDoubleRegZero);
1063 __ BranchShort(&done);
1066 __ ldc1(result, ExpConstant(2, temp3));
1072 // nop(CODE_AGE_MARKER_NOP)
1073 static const uint32_t kCodeAgePatchFirstInstruction = 0x00010180;
1077 CodeAgingHelper::CodeAgingHelper() {
1078 DCHECK(young_sequence_.length() == kNoCodeAgeSequenceLength);
1079 // Since patcher is a large object, allocate it dynamically when needed,
1080 // to avoid overloading the stack in stress conditions.
1081 // DONT_FLUSH is used because the CodeAgingHelper is initialized early in
1082 // the process, before MIPS simulator ICache is setup.
1083 SmartPointer<CodePatcher> patcher(
1084 new CodePatcher(young_sequence_.start(),
1085 young_sequence_.length() / Assembler::kInstrSize,
1086 CodePatcher::DONT_FLUSH));
1087 PredictableCodeSizeScope scope(patcher->masm(), young_sequence_.length());
1088 patcher->masm()->Push(ra, fp, cp, a1);
1089 patcher->masm()->nop(Assembler::CODE_AGE_SEQUENCE_NOP);
1090 patcher->masm()->nop(Assembler::CODE_AGE_SEQUENCE_NOP);
1091 patcher->masm()->nop(Assembler::CODE_AGE_SEQUENCE_NOP);
1092 patcher->masm()->Daddu(
1093 fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
1098 bool CodeAgingHelper::IsOld(byte* candidate) const {
1099 return Memory::uint32_at(candidate) == kCodeAgePatchFirstInstruction;
1104 bool Code::IsYoungSequence(Isolate* isolate, byte* sequence) {
1105 bool result = isolate->code_aging_helper()->IsYoung(sequence);
1106 DCHECK(result || isolate->code_aging_helper()->IsOld(sequence));
1111 void Code::GetCodeAgeAndParity(Isolate* isolate, byte* sequence, Age* age,
1112 MarkingParity* parity) {
1113 if (IsYoungSequence(isolate, sequence)) {
1114 *age = kNoAgeCodeAge;
1115 *parity = NO_MARKING_PARITY;
1117 Address target_address = Assembler::target_address_at(
1118 sequence + Assembler::kInstrSize);
1119 Code* stub = GetCodeFromTargetAddress(target_address);
1120 GetCodeAgeAndParity(stub, age, parity);
1125 void Code::PatchPlatformCodeAge(Isolate* isolate,
1128 MarkingParity parity) {
1129 uint32_t young_length = isolate->code_aging_helper()->young_sequence_length();
1130 if (age == kNoAgeCodeAge) {
1131 isolate->code_aging_helper()->CopyYoungSequenceTo(sequence);
1132 CpuFeatures::FlushICache(sequence, young_length);
1134 Code* stub = GetCodeAgeStub(isolate, age, parity);
1135 CodePatcher patcher(sequence, young_length / Assembler::kInstrSize);
1136 // Mark this code sequence for FindPlatformCodeAgeSequence().
1137 patcher.masm()->nop(Assembler::CODE_AGE_MARKER_NOP);
1138 // Load the stub address to t9 and call it,
1139 // GetCodeAgeAndParity() extracts the stub address from this instruction.
1142 Operand(reinterpret_cast<uint64_t>(stub->instruction_start())),
1144 patcher.masm()->nop(); // Prevent jalr to jal optimization.
1145 patcher.masm()->jalr(t9, a0);
1146 patcher.masm()->nop(); // Branch delay slot nop.
1147 patcher.masm()->nop(); // Pad the empty space.
1154 } } // namespace v8::internal
1156 #endif // V8_TARGET_ARCH_MIPS64