1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
7 #if V8_TARGET_ARCH_MIPS
10 #include "macro-assembler.h"
11 #include "simulator-mips.h"
20 #if defined(USE_SIMULATOR)
21 byte* fast_exp_mips_machine_code = NULL;
22 double fast_exp_simulator(double x) {
23 return Simulator::current(Isolate::Current())->CallFP(
24 fast_exp_mips_machine_code, x, 0);
29 UnaryMathFunction CreateExpFunction() {
30 if (!FLAG_fast_math) return &std::exp;
32 byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
33 if (buffer == NULL) return &std::exp;
34 ExternalReference::InitializeMathExpData();
36 MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
39 DoubleRegister input = f12;
40 DoubleRegister result = f0;
41 DoubleRegister double_scratch1 = f4;
42 DoubleRegister double_scratch2 = f6;
47 __ MovFromFloatParameter(input);
48 __ Push(temp3, temp2, temp1);
49 MathExpGenerator::EmitMathExp(
50 &masm, input, result, double_scratch1, double_scratch2,
52 __ Pop(temp3, temp2, temp1);
53 __ MovToFloatResult(result);
59 ASSERT(!RelocInfo::RequiresRelocation(desc));
61 CPU::FlushICache(buffer, actual_size);
62 OS::ProtectCode(buffer, actual_size);
64 #if !defined(USE_SIMULATOR)
65 return FUNCTION_CAST<UnaryMathFunction>(buffer);
67 fast_exp_mips_machine_code = buffer;
68 return &fast_exp_simulator;
73 #if defined(V8_HOST_ARCH_MIPS)
74 OS::MemCopyUint8Function CreateMemCopyUint8Function(
75 OS::MemCopyUint8Function stub) {
76 #if defined(USE_SIMULATOR)
80 byte* buffer = static_cast<byte*>(OS::Allocate(3 * KB, &actual_size, true));
81 if (buffer == NULL) return stub;
83 // This code assumes that cache lines are 32 bytes and if the cache line is
84 // larger it will not work correctly.
85 MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
88 Label lastb, unaligned, aligned, chkw,
89 loop16w, chk1w, wordCopy_loop, skip_pref, lastbloop,
90 leave, ua_chk16w, ua_loop16w, ua_skip_pref, ua_chkw,
91 ua_chk1w, ua_wordCopy_loop, ua_smallCopy, ua_smallCopy_loop;
93 // The size of each prefetch.
94 uint32_t pref_chunk = 32;
95 // The maximum size of a prefetch, it must not be less then pref_chunk.
96 // If the real size of a prefetch is greater then max_pref_size and
97 // the kPrefHintPrepareForStore hint is used, the code will not work
99 uint32_t max_pref_size = 128;
100 ASSERT(pref_chunk < max_pref_size);
102 // pref_limit is set based on the fact that we never use an offset
103 // greater then 5 on a store pref and that a single pref can
104 // never be larger then max_pref_size.
105 uint32_t pref_limit = (5 * pref_chunk) + max_pref_size;
106 int32_t pref_hint_load = kPrefHintLoadStreamed;
107 int32_t pref_hint_store = kPrefHintPrepareForStore;
108 uint32_t loadstore_chunk = 4;
110 // The initial prefetches may fetch bytes that are before the buffer being
111 // copied. Start copies with an offset of 4 so avoid this situation when
112 // using kPrefHintPrepareForStore.
113 ASSERT(pref_hint_store != kPrefHintPrepareForStore ||
114 pref_chunk * 4 >= max_pref_size);
116 // If the size is less than 8, go to lastb. Regardless of size,
117 // copy dst pointer to v0 for the retuen value.
118 __ slti(t2, a2, 2 * loadstore_chunk);
119 __ bne(t2, zero_reg, &lastb);
120 __ mov(v0, a0); // In delay slot.
122 // If src and dst have different alignments, go to unaligned, if they
123 // have the same alignment (but are not actually aligned) do a partial
124 // load/store to make them aligned. If they are both already aligned
125 // we can start copying at aligned.
127 __ andi(t8, t8, loadstore_chunk - 1); // t8 is a0/a1 word-displacement.
128 __ bne(t8, zero_reg, &unaligned);
129 __ subu(a3, zero_reg, a0); // In delay slot.
131 __ andi(a3, a3, loadstore_chunk - 1); // Copy a3 bytes to align a0/a1.
132 __ beq(a3, zero_reg, &aligned); // Already aligned.
133 __ subu(a2, a2, a3); // In delay slot. a2 is the remining bytes count.
135 if (kArchEndian == kLittle) {
136 __ lwr(t8, MemOperand(a1));
138 __ swr(t8, MemOperand(a0));
141 __ lwl(t8, MemOperand(a1));
143 __ swl(t8, MemOperand(a0));
146 // Now dst/src are both aligned to (word) aligned addresses. Set a2 to
147 // count how many bytes we have to copy after all the 64 byte chunks are
148 // copied and a3 to the dst pointer after all the 64 byte chunks have been
149 // copied. We will loop, incrementing a0 and a1 until a0 equals a3.
151 __ andi(t8, a2, 0x3f);
152 __ beq(a2, t8, &chkw); // Less than 64?
153 __ subu(a3, a2, t8); // In delay slot.
154 __ addu(a3, a0, a3); // Now a3 is the final dst after loop.
156 // When in the loop we prefetch with kPrefHintPrepareForStore hint,
157 // in this case the a0+x should be past the "t0-32" address. This means:
158 // for x=128 the last "safe" a0 address is "t0-160". Alternatively, for
159 // x=64 the last "safe" a0 address is "t0-96". In the current version we
160 // will use "pref hint, 128(a0)", so "t0-160" is the limit.
161 if (pref_hint_store == kPrefHintPrepareForStore) {
162 __ addu(t0, a0, a2); // t0 is the "past the end" address.
163 __ Subu(t9, t0, pref_limit); // t9 is the "last safe pref" address.
166 __ Pref(pref_hint_load, MemOperand(a1, 0 * pref_chunk));
167 __ Pref(pref_hint_load, MemOperand(a1, 1 * pref_chunk));
168 __ Pref(pref_hint_load, MemOperand(a1, 2 * pref_chunk));
169 __ Pref(pref_hint_load, MemOperand(a1, 3 * pref_chunk));
171 if (pref_hint_store != kPrefHintPrepareForStore) {
172 __ Pref(pref_hint_store, MemOperand(a0, 1 * pref_chunk));
173 __ Pref(pref_hint_store, MemOperand(a0, 2 * pref_chunk));
174 __ Pref(pref_hint_store, MemOperand(a0, 3 * pref_chunk));
177 __ lw(t0, MemOperand(a1));
179 if (pref_hint_store == kPrefHintPrepareForStore) {
180 __ sltu(v1, t9, a0); // If a0 > t9, don't use next prefetch.
181 __ Branch(USE_DELAY_SLOT, &skip_pref, gt, v1, Operand(zero_reg));
183 __ lw(t1, MemOperand(a1, 1, loadstore_chunk)); // Maybe in delay slot.
185 __ Pref(pref_hint_store, MemOperand(a0, 4 * pref_chunk));
186 __ Pref(pref_hint_store, MemOperand(a0, 5 * pref_chunk));
189 __ lw(t2, MemOperand(a1, 2, loadstore_chunk));
190 __ lw(t3, MemOperand(a1, 3, loadstore_chunk));
191 __ lw(t4, MemOperand(a1, 4, loadstore_chunk));
192 __ lw(t5, MemOperand(a1, 5, loadstore_chunk));
193 __ lw(t6, MemOperand(a1, 6, loadstore_chunk));
194 __ lw(t7, MemOperand(a1, 7, loadstore_chunk));
195 __ Pref(pref_hint_load, MemOperand(a1, 4 * pref_chunk));
197 __ sw(t0, MemOperand(a0));
198 __ sw(t1, MemOperand(a0, 1, loadstore_chunk));
199 __ sw(t2, MemOperand(a0, 2, loadstore_chunk));
200 __ sw(t3, MemOperand(a0, 3, loadstore_chunk));
201 __ sw(t4, MemOperand(a0, 4, loadstore_chunk));
202 __ sw(t5, MemOperand(a0, 5, loadstore_chunk));
203 __ sw(t6, MemOperand(a0, 6, loadstore_chunk));
204 __ sw(t7, MemOperand(a0, 7, loadstore_chunk));
206 __ lw(t0, MemOperand(a1, 8, loadstore_chunk));
207 __ lw(t1, MemOperand(a1, 9, loadstore_chunk));
208 __ lw(t2, MemOperand(a1, 10, loadstore_chunk));
209 __ lw(t3, MemOperand(a1, 11, loadstore_chunk));
210 __ lw(t4, MemOperand(a1, 12, loadstore_chunk));
211 __ lw(t5, MemOperand(a1, 13, loadstore_chunk));
212 __ lw(t6, MemOperand(a1, 14, loadstore_chunk));
213 __ lw(t7, MemOperand(a1, 15, loadstore_chunk));
214 __ Pref(pref_hint_load, MemOperand(a1, 5 * pref_chunk));
216 __ sw(t0, MemOperand(a0, 8, loadstore_chunk));
217 __ sw(t1, MemOperand(a0, 9, loadstore_chunk));
218 __ sw(t2, MemOperand(a0, 10, loadstore_chunk));
219 __ sw(t3, MemOperand(a0, 11, loadstore_chunk));
220 __ sw(t4, MemOperand(a0, 12, loadstore_chunk));
221 __ sw(t5, MemOperand(a0, 13, loadstore_chunk));
222 __ sw(t6, MemOperand(a0, 14, loadstore_chunk));
223 __ sw(t7, MemOperand(a0, 15, loadstore_chunk));
224 __ addiu(a0, a0, 16 * loadstore_chunk);
225 __ bne(a0, a3, &loop16w);
226 __ addiu(a1, a1, 16 * loadstore_chunk); // In delay slot.
229 // Here we have src and dest word-aligned but less than 64-bytes to go.
230 // Check for a 32 bytes chunk and copy if there is one. Otherwise jump
231 // down to chk1w to handle the tail end of the copy.
233 __ Pref(pref_hint_load, MemOperand(a1, 0 * pref_chunk));
234 __ andi(t8, a2, 0x1f);
235 __ beq(a2, t8, &chk1w); // Less than 32?
236 __ nop(); // In delay slot.
237 __ lw(t0, MemOperand(a1));
238 __ lw(t1, MemOperand(a1, 1, loadstore_chunk));
239 __ lw(t2, MemOperand(a1, 2, loadstore_chunk));
240 __ lw(t3, MemOperand(a1, 3, loadstore_chunk));
241 __ lw(t4, MemOperand(a1, 4, loadstore_chunk));
242 __ lw(t5, MemOperand(a1, 5, loadstore_chunk));
243 __ lw(t6, MemOperand(a1, 6, loadstore_chunk));
244 __ lw(t7, MemOperand(a1, 7, loadstore_chunk));
245 __ addiu(a1, a1, 8 * loadstore_chunk);
246 __ sw(t0, MemOperand(a0));
247 __ sw(t1, MemOperand(a0, 1, loadstore_chunk));
248 __ sw(t2, MemOperand(a0, 2, loadstore_chunk));
249 __ sw(t3, MemOperand(a0, 3, loadstore_chunk));
250 __ sw(t4, MemOperand(a0, 4, loadstore_chunk));
251 __ sw(t5, MemOperand(a0, 5, loadstore_chunk));
252 __ sw(t6, MemOperand(a0, 6, loadstore_chunk));
253 __ sw(t7, MemOperand(a0, 7, loadstore_chunk));
254 __ addiu(a0, a0, 8 * loadstore_chunk);
256 // Here we have less than 32 bytes to copy. Set up for a loop to copy
257 // one word at a time. Set a2 to count how many bytes we have to copy
258 // after all the word chunks are copied and a3 to the dst pointer after
259 // all the word chunks have been copied. We will loop, incrementing a0
260 // and a1 untill a0 equals a3.
262 __ andi(a2, t8, loadstore_chunk - 1);
263 __ beq(a2, t8, &lastb);
264 __ subu(a3, t8, a2); // In delay slot.
267 __ bind(&wordCopy_loop);
268 __ lw(t3, MemOperand(a1));
269 __ addiu(a0, a0, loadstore_chunk);
270 __ addiu(a1, a1, loadstore_chunk);
271 __ bne(a0, a3, &wordCopy_loop);
272 __ sw(t3, MemOperand(a0, -1, loadstore_chunk)); // In delay slot.
275 __ Branch(&leave, le, a2, Operand(zero_reg));
279 __ lb(v1, MemOperand(a1));
282 __ bne(a0, a3, &lastbloop);
283 __ sb(v1, MemOperand(a0, -1)); // In delay slot.
289 // Unaligned case. Only the dst gets aligned so we need to do partial
290 // loads of the source followed by normal stores to the dst (once we
291 // have aligned the destination).
293 __ andi(a3, a3, loadstore_chunk - 1); // Copy a3 bytes to align a0/a1.
294 __ beq(a3, zero_reg, &ua_chk16w);
295 __ subu(a2, a2, a3); // In delay slot.
297 if (kArchEndian == kLittle) {
298 __ lwr(v1, MemOperand(a1));
300 MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
302 __ swr(v1, MemOperand(a0));
305 __ lwl(v1, MemOperand(a1));
307 MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
309 __ swl(v1, MemOperand(a0));
313 // Now the dst (but not the source) is aligned. Set a2 to count how many
314 // bytes we have to copy after all the 64 byte chunks are copied and a3 to
315 // the dst pointer after all the 64 byte chunks have been copied. We will
316 // loop, incrementing a0 and a1 until a0 equals a3.
318 __ andi(t8, a2, 0x3f);
319 __ beq(a2, t8, &ua_chkw);
320 __ subu(a3, a2, t8); // In delay slot.
323 if (pref_hint_store == kPrefHintPrepareForStore) {
325 __ Subu(t9, t0, pref_limit);
328 __ Pref(pref_hint_load, MemOperand(a1, 0 * pref_chunk));
329 __ Pref(pref_hint_load, MemOperand(a1, 1 * pref_chunk));
330 __ Pref(pref_hint_load, MemOperand(a1, 2 * pref_chunk));
332 if (pref_hint_store != kPrefHintPrepareForStore) {
333 __ Pref(pref_hint_store, MemOperand(a0, 1 * pref_chunk));
334 __ Pref(pref_hint_store, MemOperand(a0, 2 * pref_chunk));
335 __ Pref(pref_hint_store, MemOperand(a0, 3 * pref_chunk));
338 __ bind(&ua_loop16w);
339 __ Pref(pref_hint_load, MemOperand(a1, 3 * pref_chunk));
340 if (kArchEndian == kLittle) {
341 __ lwr(t0, MemOperand(a1));
342 __ lwr(t1, MemOperand(a1, 1, loadstore_chunk));
343 __ lwr(t2, MemOperand(a1, 2, loadstore_chunk));
345 if (pref_hint_store == kPrefHintPrepareForStore) {
347 __ Branch(USE_DELAY_SLOT, &ua_skip_pref, gt, v1, Operand(zero_reg));
349 __ lwr(t3, MemOperand(a1, 3, loadstore_chunk)); // Maybe in delay slot.
351 __ Pref(pref_hint_store, MemOperand(a0, 4 * pref_chunk));
352 __ Pref(pref_hint_store, MemOperand(a0, 5 * pref_chunk));
354 __ bind(&ua_skip_pref);
355 __ lwr(t4, MemOperand(a1, 4, loadstore_chunk));
356 __ lwr(t5, MemOperand(a1, 5, loadstore_chunk));
357 __ lwr(t6, MemOperand(a1, 6, loadstore_chunk));
358 __ lwr(t7, MemOperand(a1, 7, loadstore_chunk));
360 MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
362 MemOperand(a1, 2, loadstore_chunk, MemOperand::offset_minus_one));
364 MemOperand(a1, 3, loadstore_chunk, MemOperand::offset_minus_one));
366 MemOperand(a1, 4, loadstore_chunk, MemOperand::offset_minus_one));
368 MemOperand(a1, 5, loadstore_chunk, MemOperand::offset_minus_one));
370 MemOperand(a1, 6, loadstore_chunk, MemOperand::offset_minus_one));
372 MemOperand(a1, 7, loadstore_chunk, MemOperand::offset_minus_one));
374 MemOperand(a1, 8, loadstore_chunk, MemOperand::offset_minus_one));
376 __ lwl(t0, MemOperand(a1));
377 __ lwl(t1, MemOperand(a1, 1, loadstore_chunk));
378 __ lwl(t2, MemOperand(a1, 2, loadstore_chunk));
380 if (pref_hint_store == kPrefHintPrepareForStore) {
382 __ Branch(USE_DELAY_SLOT, &ua_skip_pref, gt, v1, Operand(zero_reg));
384 __ lwl(t3, MemOperand(a1, 3, loadstore_chunk)); // Maybe in delay slot.
386 __ Pref(pref_hint_store, MemOperand(a0, 4 * pref_chunk));
387 __ Pref(pref_hint_store, MemOperand(a0, 5 * pref_chunk));
389 __ bind(&ua_skip_pref);
390 __ lwl(t4, MemOperand(a1, 4, loadstore_chunk));
391 __ lwl(t5, MemOperand(a1, 5, loadstore_chunk));
392 __ lwl(t6, MemOperand(a1, 6, loadstore_chunk));
393 __ lwl(t7, MemOperand(a1, 7, loadstore_chunk));
395 MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
397 MemOperand(a1, 2, loadstore_chunk, MemOperand::offset_minus_one));
399 MemOperand(a1, 3, loadstore_chunk, MemOperand::offset_minus_one));
401 MemOperand(a1, 4, loadstore_chunk, MemOperand::offset_minus_one));
403 MemOperand(a1, 5, loadstore_chunk, MemOperand::offset_minus_one));
405 MemOperand(a1, 6, loadstore_chunk, MemOperand::offset_minus_one));
407 MemOperand(a1, 7, loadstore_chunk, MemOperand::offset_minus_one));
409 MemOperand(a1, 8, loadstore_chunk, MemOperand::offset_minus_one));
411 __ Pref(pref_hint_load, MemOperand(a1, 4 * pref_chunk));
412 __ sw(t0, MemOperand(a0));
413 __ sw(t1, MemOperand(a0, 1, loadstore_chunk));
414 __ sw(t2, MemOperand(a0, 2, loadstore_chunk));
415 __ sw(t3, MemOperand(a0, 3, loadstore_chunk));
416 __ sw(t4, MemOperand(a0, 4, loadstore_chunk));
417 __ sw(t5, MemOperand(a0, 5, loadstore_chunk));
418 __ sw(t6, MemOperand(a0, 6, loadstore_chunk));
419 __ sw(t7, MemOperand(a0, 7, loadstore_chunk));
420 if (kArchEndian == kLittle) {
421 __ lwr(t0, MemOperand(a1, 8, loadstore_chunk));
422 __ lwr(t1, MemOperand(a1, 9, loadstore_chunk));
423 __ lwr(t2, MemOperand(a1, 10, loadstore_chunk));
424 __ lwr(t3, MemOperand(a1, 11, loadstore_chunk));
425 __ lwr(t4, MemOperand(a1, 12, loadstore_chunk));
426 __ lwr(t5, MemOperand(a1, 13, loadstore_chunk));
427 __ lwr(t6, MemOperand(a1, 14, loadstore_chunk));
428 __ lwr(t7, MemOperand(a1, 15, loadstore_chunk));
430 MemOperand(a1, 9, loadstore_chunk, MemOperand::offset_minus_one));
432 MemOperand(a1, 10, loadstore_chunk, MemOperand::offset_minus_one));
434 MemOperand(a1, 11, loadstore_chunk, MemOperand::offset_minus_one));
436 MemOperand(a1, 12, loadstore_chunk, MemOperand::offset_minus_one));
438 MemOperand(a1, 13, loadstore_chunk, MemOperand::offset_minus_one));
440 MemOperand(a1, 14, loadstore_chunk, MemOperand::offset_minus_one));
442 MemOperand(a1, 15, loadstore_chunk, MemOperand::offset_minus_one));
444 MemOperand(a1, 16, loadstore_chunk, MemOperand::offset_minus_one));
446 __ lwl(t0, MemOperand(a1, 8, loadstore_chunk));
447 __ lwl(t1, MemOperand(a1, 9, loadstore_chunk));
448 __ lwl(t2, MemOperand(a1, 10, loadstore_chunk));
449 __ lwl(t3, MemOperand(a1, 11, loadstore_chunk));
450 __ lwl(t4, MemOperand(a1, 12, loadstore_chunk));
451 __ lwl(t5, MemOperand(a1, 13, loadstore_chunk));
452 __ lwl(t6, MemOperand(a1, 14, loadstore_chunk));
453 __ lwl(t7, MemOperand(a1, 15, loadstore_chunk));
455 MemOperand(a1, 9, loadstore_chunk, MemOperand::offset_minus_one));
457 MemOperand(a1, 10, loadstore_chunk, MemOperand::offset_minus_one));
459 MemOperand(a1, 11, loadstore_chunk, MemOperand::offset_minus_one));
461 MemOperand(a1, 12, loadstore_chunk, MemOperand::offset_minus_one));
463 MemOperand(a1, 13, loadstore_chunk, MemOperand::offset_minus_one));
465 MemOperand(a1, 14, loadstore_chunk, MemOperand::offset_minus_one));
467 MemOperand(a1, 15, loadstore_chunk, MemOperand::offset_minus_one));
469 MemOperand(a1, 16, loadstore_chunk, MemOperand::offset_minus_one));
471 __ Pref(pref_hint_load, MemOperand(a1, 5 * pref_chunk));
472 __ sw(t0, MemOperand(a0, 8, loadstore_chunk));
473 __ sw(t1, MemOperand(a0, 9, loadstore_chunk));
474 __ sw(t2, MemOperand(a0, 10, loadstore_chunk));
475 __ sw(t3, MemOperand(a0, 11, loadstore_chunk));
476 __ sw(t4, MemOperand(a0, 12, loadstore_chunk));
477 __ sw(t5, MemOperand(a0, 13, loadstore_chunk));
478 __ sw(t6, MemOperand(a0, 14, loadstore_chunk));
479 __ sw(t7, MemOperand(a0, 15, loadstore_chunk));
480 __ addiu(a0, a0, 16 * loadstore_chunk);
481 __ bne(a0, a3, &ua_loop16w);
482 __ addiu(a1, a1, 16 * loadstore_chunk); // In delay slot.
485 // Here less than 64-bytes. Check for
486 // a 32 byte chunk and copy if there is one. Otherwise jump down to
487 // ua_chk1w to handle the tail end of the copy.
489 __ Pref(pref_hint_load, MemOperand(a1));
490 __ andi(t8, a2, 0x1f);
492 __ beq(a2, t8, &ua_chk1w);
493 __ nop(); // In delay slot.
494 if (kArchEndian == kLittle) {
495 __ lwr(t0, MemOperand(a1));
496 __ lwr(t1, MemOperand(a1, 1, loadstore_chunk));
497 __ lwr(t2, MemOperand(a1, 2, loadstore_chunk));
498 __ lwr(t3, MemOperand(a1, 3, loadstore_chunk));
499 __ lwr(t4, MemOperand(a1, 4, loadstore_chunk));
500 __ lwr(t5, MemOperand(a1, 5, loadstore_chunk));
501 __ lwr(t6, MemOperand(a1, 6, loadstore_chunk));
502 __ lwr(t7, MemOperand(a1, 7, loadstore_chunk));
504 MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
506 MemOperand(a1, 2, loadstore_chunk, MemOperand::offset_minus_one));
508 MemOperand(a1, 3, loadstore_chunk, MemOperand::offset_minus_one));
510 MemOperand(a1, 4, loadstore_chunk, MemOperand::offset_minus_one));
512 MemOperand(a1, 5, loadstore_chunk, MemOperand::offset_minus_one));
514 MemOperand(a1, 6, loadstore_chunk, MemOperand::offset_minus_one));
516 MemOperand(a1, 7, loadstore_chunk, MemOperand::offset_minus_one));
518 MemOperand(a1, 8, loadstore_chunk, MemOperand::offset_minus_one));
520 __ lwl(t0, MemOperand(a1));
521 __ lwl(t1, MemOperand(a1, 1, loadstore_chunk));
522 __ lwl(t2, MemOperand(a1, 2, loadstore_chunk));
523 __ lwl(t3, MemOperand(a1, 3, loadstore_chunk));
524 __ lwl(t4, MemOperand(a1, 4, loadstore_chunk));
525 __ lwl(t5, MemOperand(a1, 5, loadstore_chunk));
526 __ lwl(t6, MemOperand(a1, 6, loadstore_chunk));
527 __ lwl(t7, MemOperand(a1, 7, loadstore_chunk));
529 MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
531 MemOperand(a1, 2, loadstore_chunk, MemOperand::offset_minus_one));
533 MemOperand(a1, 3, loadstore_chunk, MemOperand::offset_minus_one));
535 MemOperand(a1, 4, loadstore_chunk, MemOperand::offset_minus_one));
537 MemOperand(a1, 5, loadstore_chunk, MemOperand::offset_minus_one));
539 MemOperand(a1, 6, loadstore_chunk, MemOperand::offset_minus_one));
541 MemOperand(a1, 7, loadstore_chunk, MemOperand::offset_minus_one));
543 MemOperand(a1, 8, loadstore_chunk, MemOperand::offset_minus_one));
545 __ addiu(a1, a1, 8 * loadstore_chunk);
546 __ sw(t0, MemOperand(a0));
547 __ sw(t1, MemOperand(a0, 1, loadstore_chunk));
548 __ sw(t2, MemOperand(a0, 2, loadstore_chunk));
549 __ sw(t3, MemOperand(a0, 3, loadstore_chunk));
550 __ sw(t4, MemOperand(a0, 4, loadstore_chunk));
551 __ sw(t5, MemOperand(a0, 5, loadstore_chunk));
552 __ sw(t6, MemOperand(a0, 6, loadstore_chunk));
553 __ sw(t7, MemOperand(a0, 7, loadstore_chunk));
554 __ addiu(a0, a0, 8 * loadstore_chunk);
556 // Less than 32 bytes to copy. Set up for a loop to
557 // copy one word at a time.
559 __ andi(a2, t8, loadstore_chunk - 1);
560 __ beq(a2, t8, &ua_smallCopy);
561 __ subu(a3, t8, a2); // In delay slot.
564 __ bind(&ua_wordCopy_loop);
565 if (kArchEndian == kLittle) {
566 __ lwr(v1, MemOperand(a1));
568 MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
570 __ lwl(v1, MemOperand(a1));
572 MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
574 __ addiu(a0, a0, loadstore_chunk);
575 __ addiu(a1, a1, loadstore_chunk);
576 __ bne(a0, a3, &ua_wordCopy_loop);
577 __ sw(v1, MemOperand(a0, -1, loadstore_chunk)); // In delay slot.
579 // Copy the last 8 bytes.
580 __ bind(&ua_smallCopy);
581 __ beq(a2, zero_reg, &leave);
582 __ addu(a3, a0, a2); // In delay slot.
584 __ bind(&ua_smallCopy_loop);
585 __ lb(v1, MemOperand(a1));
588 __ bne(a0, a3, &ua_smallCopy_loop);
589 __ sb(v1, MemOperand(a0, -1)); // In delay slot.
596 ASSERT(!RelocInfo::RequiresRelocation(desc));
598 CPU::FlushICache(buffer, actual_size);
599 OS::ProtectCode(buffer, actual_size);
600 return FUNCTION_CAST<OS::MemCopyUint8Function>(buffer);
605 UnaryMathFunction CreateSqrtFunction() {
606 #if defined(USE_SIMULATOR)
610 byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
611 if (buffer == NULL) return &std::sqrt;
613 MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
615 __ MovFromFloatParameter(f12);
617 __ MovToFloatResult(f0);
622 ASSERT(!RelocInfo::RequiresRelocation(desc));
624 CPU::FlushICache(buffer, actual_size);
625 OS::ProtectCode(buffer, actual_size);
626 return FUNCTION_CAST<UnaryMathFunction>(buffer);
633 // -------------------------------------------------------------------------
634 // Platform-specific RuntimeCallHelper functions.
636 void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
637 masm->EnterFrame(StackFrame::INTERNAL);
638 ASSERT(!masm->has_frame());
639 masm->set_has_frame(true);
643 void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
644 masm->LeaveFrame(StackFrame::INTERNAL);
645 ASSERT(masm->has_frame());
646 masm->set_has_frame(false);
650 // -------------------------------------------------------------------------
653 #define __ ACCESS_MASM(masm)
655 void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
656 MacroAssembler* masm, AllocationSiteMode mode,
657 Label* allocation_memento_found) {
658 // ----------- S t a t e -------------
662 // -- ra : return address
663 // -- a3 : target map, scratch for subsequent call
664 // -- t0 : scratch (elements)
665 // -----------------------------------
666 if (mode == TRACK_ALLOCATION_SITE) {
667 ASSERT(allocation_memento_found != NULL);
668 __ JumpIfJSArrayHasAllocationMemento(a2, t0, allocation_memento_found);
671 // Set transitioned map.
672 __ sw(a3, FieldMemOperand(a2, HeapObject::kMapOffset));
673 __ RecordWriteField(a2,
674 HeapObject::kMapOffset,
684 void ElementsTransitionGenerator::GenerateSmiToDouble(
685 MacroAssembler* masm, AllocationSiteMode mode, Label* fail) {
686 // ----------- S t a t e -------------
690 // -- ra : return address
691 // -- a3 : target map, scratch for subsequent call
692 // -- t0 : scratch (elements)
693 // -----------------------------------
694 Label loop, entry, convert_hole, gc_required, only_change_map, done;
696 Register scratch = t6;
698 if (mode == TRACK_ALLOCATION_SITE) {
699 __ JumpIfJSArrayHasAllocationMemento(a2, t0, fail);
702 // Check for empty arrays, which only require a map transition and no changes
703 // to the backing store.
704 __ lw(t0, FieldMemOperand(a2, JSObject::kElementsOffset));
705 __ LoadRoot(at, Heap::kEmptyFixedArrayRootIndex);
706 __ Branch(&only_change_map, eq, at, Operand(t0));
709 __ lw(t1, FieldMemOperand(t0, FixedArray::kLengthOffset));
710 // t0: source FixedArray
711 // t1: number of elements (smi-tagged)
713 // Allocate new FixedDoubleArray.
714 __ sll(scratch, t1, 2);
715 __ Addu(scratch, scratch, FixedDoubleArray::kHeaderSize);
716 __ Allocate(scratch, t2, t3, t5, &gc_required, DOUBLE_ALIGNMENT);
717 // t2: destination FixedDoubleArray, not tagged as heap object
719 // Set destination FixedDoubleArray's length and map.
720 __ LoadRoot(t5, Heap::kFixedDoubleArrayMapRootIndex);
721 __ sw(t1, MemOperand(t2, FixedDoubleArray::kLengthOffset));
722 __ sw(t5, MemOperand(t2, HeapObject::kMapOffset));
723 // Update receiver's map.
725 __ sw(a3, FieldMemOperand(a2, HeapObject::kMapOffset));
726 __ RecordWriteField(a2,
727 HeapObject::kMapOffset,
734 // Replace receiver's backing store with newly created FixedDoubleArray.
735 __ Addu(a3, t2, Operand(kHeapObjectTag));
736 __ sw(a3, FieldMemOperand(a2, JSObject::kElementsOffset));
737 __ RecordWriteField(a2,
738 JSObject::kElementsOffset,
747 // Prepare for conversion loop.
748 __ Addu(a3, t0, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
749 __ Addu(t3, t2, Operand(FixedDoubleArray::kHeaderSize));
752 __ li(t0, Operand(kHoleNanLower32));
753 __ li(t1, Operand(kHoleNanUpper32));
754 // t0: kHoleNanLower32
755 // t1: kHoleNanUpper32
756 // t2: end of destination FixedDoubleArray, not tagged
757 // t3: begin of FixedDoubleArray element fields, not tagged
761 __ bind(&only_change_map);
762 __ sw(a3, FieldMemOperand(a2, HeapObject::kMapOffset));
763 __ RecordWriteField(a2,
764 HeapObject::kMapOffset,
773 // Call into runtime if GC is required.
774 __ bind(&gc_required);
778 // Convert and copy elements.
780 __ lw(t5, MemOperand(a3));
781 __ Addu(a3, a3, kIntSize);
782 // t5: current element
783 __ UntagAndJumpIfNotSmi(t5, t5, &convert_hole);
785 // Normal smi, convert to double and store.
788 __ sdc1(f0, MemOperand(t3));
789 __ Addu(t3, t3, kDoubleSize);
793 // Hole found, store the-hole NaN.
794 __ bind(&convert_hole);
795 if (FLAG_debug_code) {
796 // Restore a "smi-untagged" heap object.
798 __ Or(t5, t5, Operand(1));
799 __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
800 __ Assert(eq, kObjectFoundInSmiOnlyArray, at, Operand(t5));
802 __ sw(t0, MemOperand(t3, Register::kMantissaOffset)); // mantissa
803 __ sw(t1, MemOperand(t3, Register::kExponentOffset)); // exponent
804 __ Addu(t3, t3, kDoubleSize);
807 __ Branch(&loop, lt, t3, Operand(t2));
814 void ElementsTransitionGenerator::GenerateDoubleToObject(
815 MacroAssembler* masm, AllocationSiteMode mode, Label* fail) {
816 // ----------- S t a t e -------------
820 // -- ra : return address
821 // -- a3 : target map, scratch for subsequent call
822 // -- t0 : scratch (elements)
823 // -----------------------------------
824 Label entry, loop, convert_hole, gc_required, only_change_map;
826 if (mode == TRACK_ALLOCATION_SITE) {
827 __ JumpIfJSArrayHasAllocationMemento(a2, t0, fail);
830 // Check for empty arrays, which only require a map transition and no changes
831 // to the backing store.
832 __ lw(t0, FieldMemOperand(a2, JSObject::kElementsOffset));
833 __ LoadRoot(at, Heap::kEmptyFixedArrayRootIndex);
834 __ Branch(&only_change_map, eq, at, Operand(t0));
836 __ MultiPush(a0.bit() | a1.bit() | a2.bit() | a3.bit() | ra.bit());
838 __ lw(t1, FieldMemOperand(t0, FixedArray::kLengthOffset));
839 // t0: source FixedArray
840 // t1: number of elements (smi-tagged)
842 // Allocate new FixedArray.
844 __ Addu(a0, a0, FixedDoubleArray::kHeaderSize);
845 __ Allocate(a0, t2, t3, t5, &gc_required, NO_ALLOCATION_FLAGS);
846 // t2: destination FixedArray, not tagged as heap object
847 // Set destination FixedDoubleArray's length and map.
848 __ LoadRoot(t5, Heap::kFixedArrayMapRootIndex);
849 __ sw(t1, MemOperand(t2, FixedDoubleArray::kLengthOffset));
850 __ sw(t5, MemOperand(t2, HeapObject::kMapOffset));
852 // Prepare for conversion loop.
853 __ Addu(t0, t0, Operand(
854 FixedDoubleArray::kHeaderSize - kHeapObjectTag
855 + Register::kExponentOffset));
856 __ Addu(a3, t2, Operand(FixedArray::kHeaderSize));
857 __ Addu(t2, t2, Operand(kHeapObjectTag));
860 __ LoadRoot(t3, Heap::kTheHoleValueRootIndex);
861 __ LoadRoot(t5, Heap::kHeapNumberMapRootIndex);
862 // Using offsetted addresses.
863 // a3: begin of destination FixedArray element fields, not tagged
864 // t0: begin of source FixedDoubleArray element fields, not tagged,
865 // points to the exponent
866 // t1: end of destination FixedArray, not tagged
867 // t2: destination FixedArray
868 // t3: the-hole pointer
869 // t5: heap number map
872 // Call into runtime if GC is required.
873 __ bind(&gc_required);
874 __ MultiPop(a0.bit() | a1.bit() | a2.bit() | a3.bit() | ra.bit());
879 __ lw(a1, MemOperand(t0));
880 __ Addu(t0, t0, kDoubleSize);
881 // a1: current element's upper 32 bit
882 // t0: address of next element's upper 32 bit
883 __ Branch(&convert_hole, eq, a1, Operand(kHoleNanUpper32));
885 // Non-hole double, copy value into a heap number.
886 __ AllocateHeapNumber(a2, a0, t6, t5, &gc_required);
887 // a2: new heap number
888 // Load mantissa of current element, t0 point to exponent of next element.
889 __ lw(a0, MemOperand(t0, (Register::kMantissaOffset
890 - Register::kExponentOffset - kDoubleSize)));
891 __ sw(a0, FieldMemOperand(a2, HeapNumber::kMantissaOffset));
892 __ sw(a1, FieldMemOperand(a2, HeapNumber::kExponentOffset));
894 __ sw(a2, MemOperand(a3));
895 __ Addu(a3, a3, kIntSize);
905 // Replace the-hole NaN with the-hole pointer.
906 __ bind(&convert_hole);
907 __ sw(t3, MemOperand(a3));
908 __ Addu(a3, a3, kIntSize);
911 __ Branch(&loop, lt, a3, Operand(t1));
913 __ MultiPop(a2.bit() | a3.bit() | a0.bit() | a1.bit());
914 // Replace receiver's backing store with newly created and filled FixedArray.
915 __ sw(t2, FieldMemOperand(a2, JSObject::kElementsOffset));
916 __ RecordWriteField(a2,
917 JSObject::kElementsOffset,
926 __ bind(&only_change_map);
927 // Update receiver's map.
928 __ sw(a3, FieldMemOperand(a2, HeapObject::kMapOffset));
929 __ RecordWriteField(a2,
930 HeapObject::kMapOffset,
940 void StringCharLoadGenerator::Generate(MacroAssembler* masm,
944 Label* call_runtime) {
945 // Fetch the instance type of the receiver into result register.
946 __ lw(result, FieldMemOperand(string, HeapObject::kMapOffset));
947 __ lbu(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
949 // We need special handling for indirect strings.
950 Label check_sequential;
951 __ And(at, result, Operand(kIsIndirectStringMask));
952 __ Branch(&check_sequential, eq, at, Operand(zero_reg));
954 // Dispatch on the indirect string shape: slice or cons.
956 __ And(at, result, Operand(kSlicedNotConsMask));
957 __ Branch(&cons_string, eq, at, Operand(zero_reg));
960 Label indirect_string_loaded;
961 __ lw(result, FieldMemOperand(string, SlicedString::kOffsetOffset));
962 __ lw(string, FieldMemOperand(string, SlicedString::kParentOffset));
963 __ sra(at, result, kSmiTagSize);
964 __ Addu(index, index, at);
965 __ jmp(&indirect_string_loaded);
967 // Handle cons strings.
968 // Check whether the right hand side is the empty string (i.e. if
969 // this is really a flat string in a cons string). If that is not
970 // the case we would rather go to the runtime system now to flatten
972 __ bind(&cons_string);
973 __ lw(result, FieldMemOperand(string, ConsString::kSecondOffset));
974 __ LoadRoot(at, Heap::kempty_stringRootIndex);
975 __ Branch(call_runtime, ne, result, Operand(at));
976 // Get the first of the two strings and load its instance type.
977 __ lw(string, FieldMemOperand(string, ConsString::kFirstOffset));
979 __ bind(&indirect_string_loaded);
980 __ lw(result, FieldMemOperand(string, HeapObject::kMapOffset));
981 __ lbu(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
983 // Distinguish sequential and external strings. Only these two string
984 // representations can reach here (slices and flat cons strings have been
985 // reduced to the underlying sequential or external string).
986 Label external_string, check_encoding;
987 __ bind(&check_sequential);
988 STATIC_ASSERT(kSeqStringTag == 0);
989 __ And(at, result, Operand(kStringRepresentationMask));
990 __ Branch(&external_string, ne, at, Operand(zero_reg));
992 // Prepare sequential strings
993 STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
996 SeqTwoByteString::kHeaderSize - kHeapObjectTag);
997 __ jmp(&check_encoding);
999 // Handle external strings.
1000 __ bind(&external_string);
1001 if (FLAG_debug_code) {
1002 // Assert that we do not have a cons or slice (indirect strings) here.
1003 // Sequential strings have already been ruled out.
1004 __ And(at, result, Operand(kIsIndirectStringMask));
1005 __ Assert(eq, kExternalStringExpectedButNotFound,
1006 at, Operand(zero_reg));
1008 // Rule out short external strings.
1009 STATIC_CHECK(kShortExternalStringTag != 0);
1010 __ And(at, result, Operand(kShortExternalStringMask));
1011 __ Branch(call_runtime, ne, at, Operand(zero_reg));
1012 __ lw(string, FieldMemOperand(string, ExternalString::kResourceDataOffset));
1015 __ bind(&check_encoding);
1016 STATIC_ASSERT(kTwoByteStringTag == 0);
1017 __ And(at, result, Operand(kStringEncodingMask));
1018 __ Branch(&ascii, ne, at, Operand(zero_reg));
1020 __ sll(at, index, 1);
1021 __ Addu(at, string, at);
1022 __ lhu(result, MemOperand(at));
1026 __ Addu(at, string, index);
1027 __ lbu(result, MemOperand(at));
1032 static MemOperand ExpConstant(int index, Register base) {
1033 return MemOperand(base, index * kDoubleSize);
1037 void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
1038 DoubleRegister input,
1039 DoubleRegister result,
1040 DoubleRegister double_scratch1,
1041 DoubleRegister double_scratch2,
1045 ASSERT(!input.is(result));
1046 ASSERT(!input.is(double_scratch1));
1047 ASSERT(!input.is(double_scratch2));
1048 ASSERT(!result.is(double_scratch1));
1049 ASSERT(!result.is(double_scratch2));
1050 ASSERT(!double_scratch1.is(double_scratch2));
1051 ASSERT(!temp1.is(temp2));
1052 ASSERT(!temp1.is(temp3));
1053 ASSERT(!temp2.is(temp3));
1054 ASSERT(ExternalReference::math_exp_constants(0).address() != NULL);
1056 Label zero, infinity, done;
1058 __ li(temp3, Operand(ExternalReference::math_exp_constants(0)));
1060 __ ldc1(double_scratch1, ExpConstant(0, temp3));
1061 __ BranchF(&zero, NULL, ge, double_scratch1, input);
1063 __ ldc1(double_scratch2, ExpConstant(1, temp3));
1064 __ BranchF(&infinity, NULL, ge, input, double_scratch2);
1066 __ ldc1(double_scratch1, ExpConstant(3, temp3));
1067 __ ldc1(result, ExpConstant(4, temp3));
1068 __ mul_d(double_scratch1, double_scratch1, input);
1069 __ add_d(double_scratch1, double_scratch1, result);
1070 __ FmoveLow(temp2, double_scratch1);
1071 __ sub_d(double_scratch1, double_scratch1, result);
1072 __ ldc1(result, ExpConstant(6, temp3));
1073 __ ldc1(double_scratch2, ExpConstant(5, temp3));
1074 __ mul_d(double_scratch1, double_scratch1, double_scratch2);
1075 __ sub_d(double_scratch1, double_scratch1, input);
1076 __ sub_d(result, result, double_scratch1);
1077 __ mul_d(double_scratch2, double_scratch1, double_scratch1);
1078 __ mul_d(result, result, double_scratch2);
1079 __ ldc1(double_scratch2, ExpConstant(7, temp3));
1080 __ mul_d(result, result, double_scratch2);
1081 __ sub_d(result, result, double_scratch1);
1082 // Mov 1 in double_scratch2 as math_exp_constants_array[8] == 1.
1083 ASSERT(*reinterpret_cast<double*>
1084 (ExternalReference::math_exp_constants(8).address()) == 1);
1085 __ Move(double_scratch2, 1);
1086 __ add_d(result, result, double_scratch2);
1087 __ srl(temp1, temp2, 11);
1088 __ Ext(temp2, temp2, 0, 11);
1089 __ Addu(temp1, temp1, Operand(0x3ff));
1091 // Must not call ExpConstant() after overwriting temp3!
1092 __ li(temp3, Operand(ExternalReference::math_exp_log_table()));
1093 __ sll(at, temp2, 3);
1094 __ Addu(temp3, temp3, Operand(at));
1095 __ lw(temp2, MemOperand(temp3, Register::kMantissaOffset));
1096 __ lw(temp3, MemOperand(temp3, Register::kExponentOffset));
1097 // The first word is loaded is the lower number register.
1098 if (temp2.code() < temp3.code()) {
1099 __ sll(at, temp1, 20);
1100 __ Or(temp1, temp3, at);
1101 __ Move(double_scratch1, temp2, temp1);
1103 __ sll(at, temp1, 20);
1104 __ Or(temp1, temp2, at);
1105 __ Move(double_scratch1, temp3, temp1);
1107 __ mul_d(result, result, double_scratch1);
1108 __ BranchShort(&done);
1111 __ Move(result, kDoubleRegZero);
1112 __ BranchShort(&done);
1115 __ ldc1(result, ExpConstant(2, temp3));
1121 // nop(CODE_AGE_MARKER_NOP)
1122 static const uint32_t kCodeAgePatchFirstInstruction = 0x00010180;
1125 static byte* GetNoCodeAgeSequence(uint32_t* length) {
1126 // The sequence of instructions that is patched out for aging code is the
1127 // following boilerplate stack-building prologue that is found in FUNCTIONS
1128 static bool initialized = false;
1129 static uint32_t sequence[kNoCodeAgeSequenceLength];
1130 byte* byte_sequence = reinterpret_cast<byte*>(sequence);
1131 *length = kNoCodeAgeSequenceLength * Assembler::kInstrSize;
1133 // Since patcher is a large object, allocate it dynamically when needed,
1134 // to avoid overloading the stack in stress conditions.
1135 SmartPointer<CodePatcher>
1136 patcher(new CodePatcher(byte_sequence, kNoCodeAgeSequenceLength));
1137 PredictableCodeSizeScope scope(patcher->masm(), *length);
1138 patcher->masm()->Push(ra, fp, cp, a1);
1139 patcher->masm()->nop(Assembler::CODE_AGE_SEQUENCE_NOP);
1140 patcher->masm()->Addu(
1141 fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
1144 return byte_sequence;
1148 bool Code::IsYoungSequence(byte* sequence) {
1149 uint32_t young_length;
1150 byte* young_sequence = GetNoCodeAgeSequence(&young_length);
1151 bool result = !memcmp(sequence, young_sequence, young_length);
1153 Memory::uint32_at(sequence) == kCodeAgePatchFirstInstruction);
1158 void Code::GetCodeAgeAndParity(byte* sequence, Age* age,
1159 MarkingParity* parity) {
1160 if (IsYoungSequence(sequence)) {
1161 *age = kNoAgeCodeAge;
1162 *parity = NO_MARKING_PARITY;
1164 Address target_address = Assembler::target_address_at(
1165 sequence + Assembler::kInstrSize);
1166 Code* stub = GetCodeFromTargetAddress(target_address);
1167 GetCodeAgeAndParity(stub, age, parity);
1172 void Code::PatchPlatformCodeAge(Isolate* isolate,
1175 MarkingParity parity) {
1176 uint32_t young_length;
1177 byte* young_sequence = GetNoCodeAgeSequence(&young_length);
1178 if (age == kNoAgeCodeAge) {
1179 CopyBytes(sequence, young_sequence, young_length);
1180 CPU::FlushICache(sequence, young_length);
1182 Code* stub = GetCodeAgeStub(isolate, age, parity);
1183 CodePatcher patcher(sequence, young_length / Assembler::kInstrSize);
1184 // Mark this code sequence for FindPlatformCodeAgeSequence().
1185 patcher.masm()->nop(Assembler::CODE_AGE_MARKER_NOP);
1186 // Load the stub address to t9 and call it,
1187 // GetCodeAgeAndParity() extracts the stub address from this instruction.
1190 Operand(reinterpret_cast<uint32_t>(stub->instruction_start())),
1192 patcher.masm()->nop(); // Prevent jalr to jal optimization.
1193 patcher.masm()->jalr(t9, a0);
1194 patcher.masm()->nop(); // Branch delay slot nop.
1195 patcher.masm()->nop(); // Pad the empty space.
1202 } } // namespace v8::internal
1204 #endif // V8_TARGET_ARCH_MIPS