#include "allocation.h"
#include <stdlib.h> // For free, malloc.
-#include <string.h> // For memcpy.
#include "checks.h"
+#include "platform.h"
#include "utils.h"
namespace v8 {
char* StrDup(const char* str) {
int length = StrLength(str);
char* result = NewArray<char>(length + 1);
- memcpy(result, str, length);
+ OS::MemCopy(result, str, length);
result[length] = '\0';
return result;
}
int length = StrLength(str);
if (n < length) length = n;
char* result = NewArray<char>(length + 1);
- memcpy(result, str, length);
+ OS::MemCopy(result, str, length);
result[length] = '\0';
return result;
}
}
// Copy the data to align it.
unsigned* deserialized_data = i::NewArray<unsigned>(deserialized_data_length);
- i::OS::MemCopy(deserialized_data, data, length);
+ i::CopyBytes(reinterpret_cast<char*>(deserialized_data),
+ data, static_cast<size_t>(length));
return new i::ScriptDataImpl(
i::Vector<unsigned>(deserialized_data, deserialized_data_length));
// Write prefix.
char* ptr = buf.start();
- memcpy(ptr, prefix, prefix_len * v8::internal::kCharSize);
+ i::OS::MemCopy(ptr, prefix, prefix_len * v8::internal::kCharSize);
ptr += prefix_len;
// Write real content.
ptr += str_len;
// Write postfix.
- memcpy(ptr, postfix, postfix_len * v8::internal::kCharSize);
+ i::OS::MemCopy(ptr, postfix, postfix_len * v8::internal::kCharSize);
// Copy the buffer into a heap-allocated string and return it.
Local<String> result = v8::String::New(buf.start(), buf_len);
v8::ImplementationUtilities::HandleScopeData* current =
isolate_->handle_scope_data();
handle_scope_data_ = *current;
- memcpy(storage, this, sizeof(*this));
+ OS::MemCopy(storage, this, sizeof(*this));
ResetAfterArchive();
current->Initialize();
char* HandleScopeImplementer::RestoreThread(char* storage) {
- memcpy(this, storage, sizeof(*this));
+ OS::MemCopy(this, storage, sizeof(*this));
*isolate_->handle_scope_data() = handle_scope_data_;
return storage + ArchiveSpacePerThread();
}
static void DoubleAsTwoUInt32(double d, uint32_t* lo, uint32_t* hi) {
uint64_t i;
- memcpy(&i, &d, 8);
+ OS::MemCopy(&i, &d, 8);
*lo = i & 0xffffffff;
*hi = i >> 32;
// Copy the data.
int pc_delta = desc.buffer - buffer_;
int rc_delta = (desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_);
- memmove(desc.buffer, buffer_, desc.instr_size);
- memmove(reloc_info_writer.pos() + rc_delta,
- reloc_info_writer.pos(), desc.reloc_size);
+ OS::MemMove(desc.buffer, buffer_, desc.instr_size);
+ OS::MemMove(reloc_info_writer.pos() + rc_delta,
+ reloc_info_writer.pos(), desc.reloc_size);
// Switch buffers.
DeleteArray(buffer_);
const double double_data = rinfo.data64();
uint64_t uint_data = 0;
- memcpy(&uint_data, &double_data, sizeof(double_data));
+ OS::MemCopy(&uint_data, &double_data, sizeof(double_data));
emit(uint_data & 0xFFFFFFFF);
emit(uint_data >> 32);
}
uint64_t imm = high16 << 48;
double d;
- memcpy(&d, &imm, 8);
+ OS::MemCopy(&d, &imm, 8);
return d;
}
// issues when the stack allocated buffer goes out of scope.
size_t length = builder.position();
Vector<char> copy = Vector<char>::New(length + 1);
- memcpy(copy.start(), builder.Finalize(), copy.length());
+ OS::MemCopy(copy.start(), builder.Finalize(), copy.length());
masm()->RecordComment(copy.start());
}
Instruction::kInstrSize) == 0);
} else {
// Cache miss. Load memory into the cache.
- memcpy(cached_line, line, CachePage::kLineLength);
+ OS::MemCopy(cached_line, line, CachePage::kLineLength);
*cache_valid_byte = CachePage::LINE_VALID;
}
}
// Read the bits from the unsigned integer register_[] array
// into the double precision floating point value and return it.
char buffer[2 * sizeof(vfp_registers_[0])];
- memcpy(buffer, ®isters_[reg], 2 * sizeof(registers_[0]));
- memcpy(&dm_val, buffer, 2 * sizeof(registers_[0]));
+ OS::MemCopy(buffer, ®isters_[reg], 2 * sizeof(registers_[0]));
+ OS::MemCopy(&dm_val, buffer, 2 * sizeof(registers_[0]));
return(dm_val);
}
if (register_size == 2) ASSERT(reg_index < DwVfpRegister::NumRegisters());
char buffer[register_size * sizeof(vfp_registers_[0])];
- memcpy(buffer, &value, register_size * sizeof(vfp_registers_[0]));
- memcpy(&vfp_registers_[reg_index * register_size], buffer,
- register_size * sizeof(vfp_registers_[0]));
+ OS::MemCopy(buffer, &value, register_size * sizeof(vfp_registers_[0]));
+ OS::MemCopy(&vfp_registers_[reg_index * register_size], buffer,
+ register_size * sizeof(vfp_registers_[0]));
}
ReturnType value = 0;
char buffer[register_size * sizeof(vfp_registers_[0])];
- memcpy(buffer, &vfp_registers_[register_size * reg_index],
- register_size * sizeof(vfp_registers_[0]));
- memcpy(&value, buffer, register_size * sizeof(vfp_registers_[0]));
+ OS::MemCopy(buffer, &vfp_registers_[register_size * reg_index],
+ register_size * sizeof(vfp_registers_[0]));
+ OS::MemCopy(&value, buffer, register_size * sizeof(vfp_registers_[0]));
return value;
}
// otherwise allow the compiler to optimize away the copy.
char buffer[sizeof(*x)];
// Registers 0 and 1 -> x.
- memcpy(buffer, registers_, sizeof(*x));
- memcpy(x, buffer, sizeof(*x));
+ OS::MemCopy(buffer, registers_, sizeof(*x));
+ OS::MemCopy(x, buffer, sizeof(*x));
// Registers 2 and 3 -> y.
- memcpy(buffer, registers_ + 2, sizeof(*y));
- memcpy(y, buffer, sizeof(*y));
+ OS::MemCopy(buffer, registers_ + 2, sizeof(*y));
+ OS::MemCopy(y, buffer, sizeof(*y));
// Register 2 -> z.
- memcpy(buffer, registers_ + 2, sizeof(*z));
- memcpy(z, buffer, sizeof(*z));
+ OS::MemCopy(buffer, registers_ + 2, sizeof(*z));
+ OS::MemCopy(z, buffer, sizeof(*z));
}
}
void Simulator::SetFpResult(const double& result) {
if (use_eabi_hardfloat()) {
char buffer[2 * sizeof(vfp_registers_[0])];
- memcpy(buffer, &result, sizeof(buffer));
+ OS::MemCopy(buffer, &result, sizeof(buffer));
// Copy result to d0.
- memcpy(vfp_registers_, buffer, sizeof(buffer));
+ OS::MemCopy(vfp_registers_, buffer, sizeof(buffer));
} else {
char buffer[2 * sizeof(registers_[0])];
- memcpy(buffer, &result, sizeof(buffer));
+ OS::MemCopy(buffer, &result, sizeof(buffer));
// Copy result to r0 and r1.
- memcpy(registers_, buffer, sizeof(buffer));
+ OS::MemCopy(registers_, buffer, sizeof(buffer));
}
}
ReadW(reinterpret_cast<int32_t>(address + 1), instr)
};
double d;
- memcpy(&d, data, 8);
+ OS::MemCopy(&d, data, 8);
set_d_register_from_double(reg, d);
} else {
int32_t data[2];
double d = get_double_from_d_register(reg);
- memcpy(data, &d, 8);
+ OS::MemCopy(data, &d, 8);
WriteW(reinterpret_cast<int32_t>(address), data[0], instr);
WriteW(reinterpret_cast<int32_t>(address + 1), data[1], instr);
}
int vd = instr->Bits(19, 16) | (instr->Bit(7) << 4);
double dd_value = get_double_from_d_register(vd);
int32_t data[2];
- memcpy(data, &dd_value, 8);
+ OS::MemCopy(data, &dd_value, 8);
data[instr->Bit(21)] = get_register(instr->RtValue());
- memcpy(&dd_value, data, 8);
+ OS::MemCopy(&dd_value, data, 8);
set_d_register_from_double(vd, dd_value);
} else if ((instr->VLValue() == 0x1) &&
(instr->VCValue() == 0x0) &&
if (instr->HasL()) {
int32_t data[2];
double d = get_double_from_d_register(vm);
- memcpy(data, &d, 8);
+ OS::MemCopy(data, &d, 8);
set_register(rt, data[0]);
set_register(rn, data[1]);
} else {
int32_t data[] = { get_register(rt), get_register(rn) };
double d;
- memcpy(&d, data, 8);
+ OS::MemCopy(&d, data, 8);
set_d_register_from_double(vm, d);
}
}
ReadW(address + 4, instr)
};
double val;
- memcpy(&val, data, 8);
+ OS::MemCopy(&val, data, 8);
set_d_register_from_double(vd, val);
} else {
// Store double to memory: vstr.
int32_t data[2];
double val = get_double_from_d_register(vd);
- memcpy(data, &val, 8);
+ OS::MemCopy(data, &val, 8);
WriteW(address, data[0], instr);
WriteW(address + 4, data[1], instr);
}
} else {
int buffer[2];
ASSERT(sizeof(buffer[0]) * 2 == sizeof(d0));
- memcpy(buffer, &d0, sizeof(d0));
+ OS::MemCopy(buffer, &d0, sizeof(d0));
set_dw_register(0, buffer);
- memcpy(buffer, &d1, sizeof(d1));
+ OS::MemCopy(buffer, &d1, sizeof(d1));
set_dw_register(2, buffer);
}
CallInternal(entry);
#include <string.h>
#include "atomicops.h"
+#include "platform.h"
// This file only makes sense with atomicops_internals_x86_gcc.h -- it
// depends on structs that are defined in that file. If atomicops.h
// Get vendor string (issue CPUID with eax = 0)
cpuid(eax, ebx, ecx, edx, 0);
char vendor[13];
- memcpy(vendor, &ebx, 4);
- memcpy(vendor + 4, &edx, 4);
- memcpy(vendor + 8, &ecx, 4);
+ v8::internal::OS::MemCopy(vendor, &ebx, 4);
+ v8::internal::OS::MemCopy(vendor + 4, &edx, 4);
+ v8::internal::OS::MemCopy(vendor + 8, &ecx, 4);
vendor[12] = 0;
// get feature flags in ecx/edx, and family/model in eax
int src_index,
int len) {
if (len == 0) return;
- memmove(dst->data_start() + dst_index,
- src->data_start() + src_index,
- len * kDoubleSize);
+ OS::MemMove(dst->data_start() + dst_index,
+ src->data_start() + src_index,
+ len * kDoubleSize);
}
uint32_t result;
Address mantissa_ptr = reinterpret_cast<Address>(&x);
// Copy least significant 32 bits of mantissa.
- memcpy(&result, mantissa_ptr, sizeof(result));
+ OS::MemCopy(&result, mantissa_ptr, sizeof(result));
return negative ? ~result + 1 : result;
}
// Large number (outside uint32 range), Infinity or NaN.
char* Debug::ArchiveDebug(char* storage) {
char* to = storage;
- memcpy(to, reinterpret_cast<char*>(&thread_local_), sizeof(ThreadLocal));
+ OS::MemCopy(to, reinterpret_cast<char*>(&thread_local_), sizeof(ThreadLocal));
to += sizeof(ThreadLocal);
- memcpy(to, reinterpret_cast<char*>(®isters_), sizeof(registers_));
+ OS::MemCopy(to, reinterpret_cast<char*>(®isters_), sizeof(registers_));
ThreadInit();
ASSERT(to <= storage + ArchiveSpacePerThread());
return storage + ArchiveSpacePerThread();
char* Debug::RestoreDebug(char* storage) {
char* from = storage;
- memcpy(reinterpret_cast<char*>(&thread_local_), from, sizeof(ThreadLocal));
+ OS::MemCopy(
+ reinterpret_cast<char*>(&thread_local_), from, sizeof(ThreadLocal));
from += sizeof(ThreadLocal);
- memcpy(reinterpret_cast<char*>(®isters_), from, sizeof(registers_));
+ OS::MemCopy(reinterpret_cast<char*>(®isters_), from, sizeof(registers_));
ASSERT(from <= storage + ArchiveSpacePerThread());
return storage + ArchiveSpacePerThread();
}
Handle<ByteArray> TranslationBuffer::CreateByteArray(Factory* factory) {
int length = contents_.length();
Handle<ByteArray> result = factory->NewByteArray(length, TENURED);
- memcpy(result->GetDataStartAddress(), contents_.ToVector().start(), length);
+ OS::MemCopy(
+ result->GetDataStartAddress(), contents_.ToVector().start(), length);
return result;
}
Address from_address = from->address() + FixedArray::kHeaderSize;
CopyWords(reinterpret_cast<Object**>(to_address) + to_start,
reinterpret_cast<Object**>(from_address) + from_start,
- copy_size);
+ static_cast<size_t>(copy_size));
if (IsFastObjectElementsKind(from_kind) &&
IsFastObjectElementsKind(to_kind)) {
Heap* heap = from->GetHeap();
int words_per_double = (kDoubleSize / kPointerSize);
CopyWords(reinterpret_cast<Object**>(to_address),
reinterpret_cast<Object**>(from_address),
- words_per_double * copy_size);
+ static_cast<size_t>(words_per_double * copy_size));
}
char* StackGuard::ArchiveStackGuard(char* to) {
ExecutionAccess access(isolate_);
- memcpy(to, reinterpret_cast<char*>(&thread_local_), sizeof(ThreadLocal));
+ OS::MemCopy(to, reinterpret_cast<char*>(&thread_local_), sizeof(ThreadLocal));
ThreadLocal blank;
// Set the stack limits using the old thread_local_.
char* StackGuard::RestoreStackGuard(char* from) {
ExecutionAccess access(isolate_);
- memcpy(reinterpret_cast<char*>(&thread_local_), from, sizeof(ThreadLocal));
+ OS::MemCopy(
+ reinterpret_cast<char*>(&thread_local_), from, sizeof(ThreadLocal));
isolate_->heap()->SetStackLimits();
return from + sizeof(ThreadLocal);
}
// make a copy so we can NUL-terminate flag name
size_t n = arg - *name;
CHECK(n < static_cast<size_t>(buffer_size)); // buffer is too small
- memcpy(buffer, *name, n);
+ OS::MemCopy(buffer, *name, n);
buffer[n] = '\0';
*name = buffer;
// get the value
int FlagList::SetFlagsFromString(const char* str, int len) {
// make a 0-terminated copy of str
ScopedVector<char> copy0(len + 1);
- memcpy(copy0.start(), str, len);
+ OS::MemCopy(copy0.start(), str, len);
copy0[len] = '\0';
// strip leading white space
#else
#error Unsupported target architecture.
#endif
- memcpy(header->ident, ident, 16);
+ OS::MemCopy(header->ident, ident, 16);
header->type = 1;
#if defined(V8_TARGET_ARCH_IA32)
header->machine = 3;
entry->symfile_addr_ = reinterpret_cast<Address>(entry + 1);
entry->symfile_size_ = symfile_size;
- memcpy(entry->symfile_addr_, symfile_addr, symfile_size);
+ OS::MemCopy(entry->symfile_addr_, symfile_addr, symfile_size);
entry->prev_ = entry->next_ = NULL;
malloc(OFFSET_OF(ImplicitRefGroup, children_[length])));
group->parent_ = parent;
group->length_ = length;
- CopyWords(group->children_, children, static_cast<int>(length));
+ CopyWords(group->children_, children, length);
return group;
}
ASSERT_EQ(size, answer->Size());
// Fill in the characters.
- memcpy(answer->address() + SeqOneByteString::kHeaderSize,
- str.start(), str.length());
+ OS::MemCopy(answer->address() + SeqOneByteString::kHeaderSize,
+ str.start(), str.length());
return answer;
}
ASSERT_EQ(size, answer->Size());
// Fill in the characters.
- memcpy(answer->address() + SeqTwoByteString::kHeaderSize,
- str.start(), str.length() * kUC16Size);
+ OS::MemCopy(answer->address() + SeqTwoByteString::kHeaderSize,
+ str.start(), str.length() * kUC16Size);
return answer;
}
void Heap::CopyBlock(Address dst, Address src, int byte_size) {
CopyWords(reinterpret_cast<Object**>(dst),
reinterpret_cast<Object**>(src),
- byte_size / kPointerSize);
+ static_cast<size_t>(byte_size / kPointerSize));
}
*dst_slot++ = *src_slot++;
}
} else {
- memmove(dst, src, byte_size);
+ OS::MemMove(dst, src, static_cast<size_t>(byte_size));
}
}
int s_chunk_size = Min(
chunk_size_ - chunk_pos_, static_cast<int>(s_end - s));
ASSERT(s_chunk_size > 0);
- memcpy(chunk_.start() + chunk_pos_, s, s_chunk_size);
+ OS::MemCopy(chunk_.start() + chunk_pos_, s, s_chunk_size);
s += s_chunk_size;
chunk_pos_ += s_chunk_size;
MaybeWriteChunk();
ASSERT(array->map() != HEAP->fixed_cow_array_map());
Object** dst_objects = array->data_start() + dst_index;
- memmove(dst_objects,
- array->data_start() + src_index,
- len * kPointerSize);
+ OS::MemMove(dst_objects,
+ array->data_start() + src_index,
+ len * kPointerSize);
if (!InNewSpace(array)) {
for (int i = 0; i < len; i++) {
// TODO(hpayer): check store buffer for entries
int len) {
// Only works for ascii.
ASSERT(vector.length() == len);
- memcpy(chars, vector.start(), len);
+ OS::MemCopy(chars, vector.start(), len);
}
static inline void WriteTwoByteData(Vector<const char> vector,
FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
#undef ADJUST_LAST_TIME_OBJECT_COUNT
- memcpy(object_counts_last_time_, object_counts_, sizeof(object_counts_));
- memcpy(object_sizes_last_time_, object_sizes_, sizeof(object_sizes_));
+ OS::MemCopy(object_counts_last_time_, object_counts_, sizeof(object_counts_));
+ OS::MemCopy(object_sizes_last_time_, object_sizes_, sizeof(object_sizes_));
ClearObjectStats();
}
array_(zone->NewArray<HValueMapListElement>(other->array_size_)),
lists_(zone->NewArray<HValueMapListElement>(other->lists_size_)),
free_list_head_(other->free_list_head_) {
- memcpy(array_, other->array_, array_size_ * sizeof(HValueMapListElement));
- memcpy(lists_, other->lists_, lists_size_ * sizeof(HValueMapListElement));
+ OS::MemCopy(
+ array_, other->array_, array_size_ * sizeof(HValueMapListElement));
+ OS::MemCopy(
+ lists_, other->lists_, lists_size_ * sizeof(HValueMapListElement));
}
lists_ = new_lists;
if (old_lists != NULL) {
- memcpy(lists_, old_lists, old_size * sizeof(HValueMapListElement));
+ OS::MemCopy(lists_, old_lists, old_size * sizeof(HValueMapListElement));
}
for (int i = old_size; i < lists_size_; ++i) {
lists_[i].next = free_list_head_;
HSideEffectMap& HSideEffectMap::operator= (const HSideEffectMap& other) {
if (this != &other) {
- memcpy(data_, other.data_, kNumberOfTrackedSideEffects * kPointerSize);
+ OS::MemCopy(data_, other.data_, kNumberOfTrackedSideEffects * kPointerSize);
}
return *this;
}
size_t string_len = strlen(underlying_buffer) + 1;
ASSERT(string_len <= sizeof(underlying_buffer));
char* result = new char[strlen(underlying_buffer) + 1];
- memcpy(result, underlying_buffer, string_len);
+ OS::MemCopy(result, underlying_buffer, string_len);
return SmartArrayPointer<char>(result);
}
// Copy the data.
int pc_delta = desc.buffer - buffer_;
int rc_delta = (desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_);
- memmove(desc.buffer, buffer_, desc.instr_size);
- memmove(rc_delta + reloc_info_writer.pos(),
- reloc_info_writer.pos(), desc.reloc_size);
+ OS::MemMove(desc.buffer, buffer_, desc.instr_size);
+ OS::MemMove(rc_delta + reloc_info_writer.pos(),
+ reloc_info_writer.pos(), desc.reloc_size);
// Switch buffers.
if (isolate()->assembler_spare_buffer() == NULL &&
void movdqa(const Operand& dst, XMMRegister src);
void movdqu(XMMRegister dst, const Operand& src);
void movdqu(const Operand& dst, XMMRegister src);
+ void movdq(bool aligned, XMMRegister dst, const Operand& src) {
+ if (aligned) {
+ movdqa(dst, src);
+ } else {
+ movdqu(dst, src);
+ }
+ }
// Use either movsd or movlpd.
void movdbl(XMMRegister dst, const Operand& src);
}
-static void MemCopyWrapper(void* dest, const void* src, size_t size) {
- memcpy(dest, src, size);
+// Helper functions for CreateMemMoveFunction.
+#undef __
+#define __ ACCESS_MASM(masm)
+
+// Keep around global pointers to these objects so that Valgrind won't complain.
+static size_t* medium_handlers = NULL;
+static size_t* small_handlers = NULL;
+
+
+enum Direction { FORWARD, BACKWARD };
+enum Alignment { ALIGNED, UNALIGNED };
+
+// Expects registers:
+// esi - source, aligned if alignment == ALIGNED
+// edi - destination, always aligned
+// ecx - count (copy size in bytes)
+// edx - loop count (number of 64 byte chunks)
+void MemMoveEmitMainLoop(MacroAssembler* masm,
+ Label* move_last_15,
+ Direction direction,
+ Alignment alignment) {
+ Register src = esi;
+ Register dst = edi;
+ Register count = ecx;
+ Register loop_count = edx;
+ Label loop, move_last_31, move_last_63;
+ __ cmp(loop_count, 0);
+ __ j(equal, &move_last_63);
+ __ bind(&loop);
+ // Main loop. Copy in 64 byte chunks.
+ if (direction == BACKWARD) __ sub(src, Immediate(0x40));
+ __ movdq(alignment == ALIGNED, xmm0, Operand(src, 0x00));
+ __ movdq(alignment == ALIGNED, xmm1, Operand(src, 0x10));
+ __ movdq(alignment == ALIGNED, xmm2, Operand(src, 0x20));
+ __ movdq(alignment == ALIGNED, xmm3, Operand(src, 0x30));
+ if (direction == FORWARD) __ add(src, Immediate(0x40));
+ if (direction == BACKWARD) __ sub(dst, Immediate(0x40));
+ __ movdqa(Operand(dst, 0x00), xmm0);
+ __ movdqa(Operand(dst, 0x10), xmm1);
+ __ movdqa(Operand(dst, 0x20), xmm2);
+ __ movdqa(Operand(dst, 0x30), xmm3);
+ if (direction == FORWARD) __ add(dst, Immediate(0x40));
+ __ dec(loop_count);
+ __ j(not_zero, &loop);
+ // At most 63 bytes left to copy.
+ __ bind(&move_last_63);
+ __ test(count, Immediate(0x20));
+ __ j(zero, &move_last_31);
+ if (direction == BACKWARD) __ sub(src, Immediate(0x20));
+ __ movdq(alignment == ALIGNED, xmm0, Operand(src, 0x00));
+ __ movdq(alignment == ALIGNED, xmm1, Operand(src, 0x10));
+ if (direction == FORWARD) __ add(src, Immediate(0x20));
+ if (direction == BACKWARD) __ sub(dst, Immediate(0x20));
+ __ movdqa(Operand(dst, 0x00), xmm0);
+ __ movdqa(Operand(dst, 0x10), xmm1);
+ if (direction == FORWARD) __ add(dst, Immediate(0x20));
+ // At most 31 bytes left to copy.
+ __ bind(&move_last_31);
+ __ test(count, Immediate(0x10));
+ __ j(zero, move_last_15);
+ if (direction == BACKWARD) __ sub(src, Immediate(0x10));
+ __ movdq(alignment == ALIGNED, xmm0, Operand(src, 0));
+ if (direction == FORWARD) __ add(src, Immediate(0x10));
+ if (direction == BACKWARD) __ sub(dst, Immediate(0x10));
+ __ movdqa(Operand(dst, 0), xmm0);
+ if (direction == FORWARD) __ add(dst, Immediate(0x10));
+}
+
+
+void MemMoveEmitPopAndReturn(MacroAssembler* masm) {
+ __ pop(esi);
+ __ pop(edi);
+ __ ret(0);
}
-OS::MemCopyFunction CreateMemCopyFunction() {
+#undef __
+#define __ masm.
+
+
+OS::MemMoveFunction CreateMemMoveFunction() {
size_t actual_size;
// Allocate buffer in executable space.
- byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB,
- &actual_size,
- true));
- if (buffer == NULL) return &MemCopyWrapper;
+ byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
+ if (buffer == NULL) return NULL;
MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
- // Generated code is put into a fixed, unmovable, buffer, and not into
+ // Generated code is put into a fixed, unmovable buffer, and not into
// the V8 heap. We can't, and don't, refer to any relocatable addresses
// (e.g. the JavaScript nan-object).
const int kSourceOffset = 2 * kPointerSize;
const int kSizeOffset = 3 * kPointerSize;
+ // When copying up to this many bytes, use special "small" handlers.
+ const size_t kSmallCopySize = 8;
+ // When copying up to this many bytes, use special "medium" handlers.
+ const size_t kMediumCopySize = 63;
+ // When non-overlapping region of src and dst is less than this,
+ // use a more careful implementation (slightly slower).
+ const size_t kMinMoveDistance = 16;
+ // Note that these values are dictated by the implementation below,
+ // do not just change them and hope things will work!
+
int stack_offset = 0; // Update if we change the stack height.
- if (FLAG_debug_code) {
- __ cmp(Operand(esp, kSizeOffset + stack_offset),
- Immediate(OS::kMinComplexMemCopy));
- Label ok;
- __ j(greater_equal, &ok);
- __ int3();
- __ bind(&ok);
- }
+ Label backward, backward_much_overlap;
+ Label forward_much_overlap, small_size, medium_size, pop_and_return;
+ __ push(edi);
+ __ push(esi);
+ stack_offset += 2 * kPointerSize;
+ Register dst = edi;
+ Register src = esi;
+ Register count = ecx;
+ Register loop_count = edx;
+ __ mov(dst, Operand(esp, stack_offset + kDestinationOffset));
+ __ mov(src, Operand(esp, stack_offset + kSourceOffset));
+ __ mov(count, Operand(esp, stack_offset + kSizeOffset));
+
+ __ cmp(dst, src);
+ __ j(equal, &pop_and_return);
+
if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatureScope enable(&masm, SSE2);
- __ push(edi);
- __ push(esi);
- stack_offset += 2 * kPointerSize;
- Register dst = edi;
- Register src = esi;
- Register count = ecx;
- __ mov(dst, Operand(esp, stack_offset + kDestinationOffset));
- __ mov(src, Operand(esp, stack_offset + kSourceOffset));
- __ mov(count, Operand(esp, stack_offset + kSizeOffset));
-
-
- __ movdqu(xmm0, Operand(src, 0));
- __ movdqu(Operand(dst, 0), xmm0);
- __ mov(edx, dst);
- __ and_(edx, 0xF);
- __ neg(edx);
- __ add(edx, Immediate(16));
- __ add(dst, edx);
- __ add(src, edx);
- __ sub(count, edx);
-
- // edi is now aligned. Check if esi is also aligned.
- Label unaligned_source;
- __ test(src, Immediate(0x0F));
- __ j(not_zero, &unaligned_source);
+ CpuFeatureScope sse2_scope(&masm, SSE2);
+ __ prefetch(Operand(src, 0), 1);
+ __ cmp(count, kSmallCopySize);
+ __ j(below_equal, &small_size);
+ __ cmp(count, kMediumCopySize);
+ __ j(below_equal, &medium_size);
+ __ cmp(dst, src);
+ __ j(above, &backward);
+
{
+ // |dst| is a lower address than |src|. Copy front-to-back.
+ Label unaligned_source, move_last_15, skip_last_move;
+ __ mov(eax, src);
+ __ sub(eax, dst);
+ __ cmp(eax, kMinMoveDistance);
+ __ j(below, &forward_much_overlap);
+ // Copy first 16 bytes.
+ __ movdqu(xmm0, Operand(src, 0));
+ __ movdqu(Operand(dst, 0), xmm0);
+ // Determine distance to alignment: 16 - (dst & 0xF).
+ __ mov(edx, dst);
+ __ and_(edx, 0xF);
+ __ neg(edx);
+ __ add(edx, Immediate(16));
+ __ add(dst, edx);
+ __ add(src, edx);
+ __ sub(count, edx);
+ // dst is now aligned. Main copy loop.
+ __ mov(loop_count, count);
+ __ shr(loop_count, 6);
+ // Check if src is also aligned.
+ __ test(src, Immediate(0xF));
+ __ j(not_zero, &unaligned_source);
// Copy loop for aligned source and destination.
- __ mov(edx, count);
- Register loop_count = ecx;
- Register count = edx;
- __ shr(loop_count, 5);
- {
- // Main copy loop.
- Label loop;
- __ bind(&loop);
- __ prefetch(Operand(src, 0x20), 1);
- __ movdqa(xmm0, Operand(src, 0x00));
- __ movdqa(xmm1, Operand(src, 0x10));
- __ add(src, Immediate(0x20));
-
- __ movdqa(Operand(dst, 0x00), xmm0);
- __ movdqa(Operand(dst, 0x10), xmm1);
- __ add(dst, Immediate(0x20));
-
- __ dec(loop_count);
- __ j(not_zero, &loop);
- }
-
- // At most 31 bytes to copy.
- Label move_less_16;
- __ test(count, Immediate(0x10));
- __ j(zero, &move_less_16);
- __ movdqa(xmm0, Operand(src, 0));
- __ add(src, Immediate(0x10));
- __ movdqa(Operand(dst, 0), xmm0);
- __ add(dst, Immediate(0x10));
- __ bind(&move_less_16);
-
+ MemMoveEmitMainLoop(&masm, &move_last_15, FORWARD, ALIGNED);
// At most 15 bytes to copy. Copy 16 bytes at end of string.
+ __ bind(&move_last_15);
__ and_(count, 0xF);
+ __ j(zero, &skip_last_move, Label::kNear);
__ movdqu(xmm0, Operand(src, count, times_1, -0x10));
__ movdqu(Operand(dst, count, times_1, -0x10), xmm0);
+ __ bind(&skip_last_move);
+ MemMoveEmitPopAndReturn(&masm);
- __ mov(eax, Operand(esp, stack_offset + kDestinationOffset));
- __ pop(esi);
- __ pop(edi);
- __ ret(0);
- }
- __ Align(16);
- {
// Copy loop for unaligned source and aligned destination.
- // If source is not aligned, we can't read it as efficiently.
__ bind(&unaligned_source);
- __ mov(edx, ecx);
- Register loop_count = ecx;
- Register count = edx;
- __ shr(loop_count, 5);
- {
- // Main copy loop
- Label loop;
- __ bind(&loop);
- __ prefetch(Operand(src, 0x20), 1);
- __ movdqu(xmm0, Operand(src, 0x00));
- __ movdqu(xmm1, Operand(src, 0x10));
- __ add(src, Immediate(0x20));
-
- __ movdqa(Operand(dst, 0x00), xmm0);
- __ movdqa(Operand(dst, 0x10), xmm1);
- __ add(dst, Immediate(0x20));
-
- __ dec(loop_count);
- __ j(not_zero, &loop);
- }
+ MemMoveEmitMainLoop(&masm, &move_last_15, FORWARD, UNALIGNED);
+ __ jmp(&move_last_15);
+
+ // Less than kMinMoveDistance offset between dst and src.
+ Label loop_until_aligned, last_15_much_overlap;
+ __ bind(&loop_until_aligned);
+ __ mov_b(eax, Operand(src, 0));
+ __ inc(src);
+ __ mov_b(Operand(dst, 0), eax);
+ __ inc(dst);
+ __ dec(count);
+ __ bind(&forward_much_overlap); // Entry point into this block.
+ __ test(dst, Immediate(0xF));
+ __ j(not_zero, &loop_until_aligned);
+ // dst is now aligned, src can't be. Main copy loop.
+ __ mov(loop_count, count);
+ __ shr(loop_count, 6);
+ MemMoveEmitMainLoop(&masm, &last_15_much_overlap, FORWARD, UNALIGNED);
+ __ bind(&last_15_much_overlap);
+ __ and_(count, 0xF);
+ __ j(zero, &pop_and_return);
+ __ cmp(count, kSmallCopySize);
+ __ j(below_equal, &small_size);
+ __ jmp(&medium_size);
+ }
- // At most 31 bytes to copy.
- Label move_less_16;
- __ test(count, Immediate(0x10));
- __ j(zero, &move_less_16);
+ {
+ // |dst| is a higher address than |src|. Copy backwards.
+ Label unaligned_source, move_first_15, skip_last_move;
+ __ bind(&backward);
+ // |dst| and |src| always point to the end of what's left to copy.
+ __ add(dst, count);
+ __ add(src, count);
+ __ mov(eax, dst);
+ __ sub(eax, src);
+ __ cmp(eax, kMinMoveDistance);
+ __ j(below, &backward_much_overlap);
+ // Copy last 16 bytes.
+ __ movdqu(xmm0, Operand(src, -0x10));
+ __ movdqu(Operand(dst, -0x10), xmm0);
+ // Find distance to alignment: dst & 0xF
+ __ mov(edx, dst);
+ __ and_(edx, 0xF);
+ __ sub(dst, edx);
+ __ sub(src, edx);
+ __ sub(count, edx);
+ // dst is now aligned. Main copy loop.
+ __ mov(loop_count, count);
+ __ shr(loop_count, 6);
+ // Check if src is also aligned.
+ __ test(src, Immediate(0xF));
+ __ j(not_zero, &unaligned_source);
+ // Copy loop for aligned source and destination.
+ MemMoveEmitMainLoop(&masm, &move_first_15, BACKWARD, ALIGNED);
+ // At most 15 bytes to copy. Copy 16 bytes at beginning of string.
+ __ bind(&move_first_15);
+ __ and_(count, 0xF);
+ __ j(zero, &skip_last_move, Label::kNear);
+ __ sub(src, count);
+ __ sub(dst, count);
__ movdqu(xmm0, Operand(src, 0));
- __ add(src, Immediate(0x10));
- __ movdqa(Operand(dst, 0), xmm0);
- __ add(dst, Immediate(0x10));
- __ bind(&move_less_16);
-
- // At most 15 bytes to copy. Copy 16 bytes at end of string.
- __ and_(count, 0x0F);
- __ movdqu(xmm0, Operand(src, count, times_1, -0x10));
- __ movdqu(Operand(dst, count, times_1, -0x10), xmm0);
+ __ movdqu(Operand(dst, 0), xmm0);
+ __ bind(&skip_last_move);
+ MemMoveEmitPopAndReturn(&masm);
- __ mov(eax, Operand(esp, stack_offset + kDestinationOffset));
- __ pop(esi);
- __ pop(edi);
- __ ret(0);
+ // Copy loop for unaligned source and aligned destination.
+ __ bind(&unaligned_source);
+ MemMoveEmitMainLoop(&masm, &move_first_15, BACKWARD, UNALIGNED);
+ __ jmp(&move_first_15);
+
+ // Less than kMinMoveDistance offset between dst and src.
+ Label loop_until_aligned, first_15_much_overlap;
+ __ bind(&loop_until_aligned);
+ __ dec(src);
+ __ dec(dst);
+ __ mov_b(eax, Operand(src, 0));
+ __ mov_b(Operand(dst, 0), eax);
+ __ dec(count);
+ __ bind(&backward_much_overlap); // Entry point into this block.
+ __ test(dst, Immediate(0xF));
+ __ j(not_zero, &loop_until_aligned);
+ // dst is now aligned, src can't be. Main copy loop.
+ __ mov(loop_count, count);
+ __ shr(loop_count, 6);
+ MemMoveEmitMainLoop(&masm, &first_15_much_overlap, BACKWARD, UNALIGNED);
+ __ bind(&first_15_much_overlap);
+ __ and_(count, 0xF);
+ __ j(zero, &pop_and_return);
+ // Small/medium handlers expect dst/src to point to the beginning.
+ __ sub(dst, count);
+ __ sub(src, count);
+ __ cmp(count, kSmallCopySize);
+ __ j(below_equal, &small_size);
+ __ jmp(&medium_size);
+ }
+ {
+ // Special handlers for 9 <= copy_size < 64. No assumptions about
+ // alignment or move distance, so all reads must be unaligned and
+ // must happen before any writes.
+ Label f9_16, f17_32, f33_48, f49_63;
+
+ __ bind(&f9_16);
+ __ movdbl(xmm0, Operand(src, 0));
+ __ movdbl(xmm1, Operand(src, count, times_1, -8));
+ __ movdbl(Operand(dst, 0), xmm0);
+ __ movdbl(Operand(dst, count, times_1, -8), xmm1);
+ MemMoveEmitPopAndReturn(&masm);
+
+ __ bind(&f17_32);
+ __ movdqu(xmm0, Operand(src, 0));
+ __ movdqu(xmm1, Operand(src, count, times_1, -0x10));
+ __ movdqu(Operand(dst, 0x00), xmm0);
+ __ movdqu(Operand(dst, count, times_1, -0x10), xmm1);
+ MemMoveEmitPopAndReturn(&masm);
+
+ __ bind(&f33_48);
+ __ movdqu(xmm0, Operand(src, 0x00));
+ __ movdqu(xmm1, Operand(src, 0x10));
+ __ movdqu(xmm2, Operand(src, count, times_1, -0x10));
+ __ movdqu(Operand(dst, 0x00), xmm0);
+ __ movdqu(Operand(dst, 0x10), xmm1);
+ __ movdqu(Operand(dst, count, times_1, -0x10), xmm2);
+ MemMoveEmitPopAndReturn(&masm);
+
+ __ bind(&f49_63);
+ __ movdqu(xmm0, Operand(src, 0x00));
+ __ movdqu(xmm1, Operand(src, 0x10));
+ __ movdqu(xmm2, Operand(src, 0x20));
+ __ movdqu(xmm3, Operand(src, count, times_1, -0x10));
+ __ movdqu(Operand(dst, 0x00), xmm0);
+ __ movdqu(Operand(dst, 0x10), xmm1);
+ __ movdqu(Operand(dst, 0x20), xmm2);
+ __ movdqu(Operand(dst, count, times_1, -0x10), xmm3);
+ MemMoveEmitPopAndReturn(&masm);
+
+ medium_handlers = new size_t[4];
+ medium_handlers[0] = reinterpret_cast<intptr_t>(buffer) + f9_16.pos();
+ medium_handlers[1] = reinterpret_cast<intptr_t>(buffer) + f17_32.pos();
+ medium_handlers[2] = reinterpret_cast<intptr_t>(buffer) + f33_48.pos();
+ medium_handlers[3] = reinterpret_cast<intptr_t>(buffer) + f49_63.pos();
+
+ __ bind(&medium_size); // Entry point into this block.
+ __ mov(eax, count);
+ __ dec(eax);
+ __ shr(eax, 4);
+ if (FLAG_debug_code) {
+ Label ok;
+ __ cmp(eax, 3);
+ __ j(below_equal, &ok);
+ __ int3();
+ __ bind(&ok);
+ }
+ __ mov(eax, Operand(eax, times_4,
+ reinterpret_cast<intptr_t>(medium_handlers)));
+ __ jmp(eax);
+ }
+ {
+ // Specialized copiers for copy_size <= 8 bytes.
+ Label f0, f1, f2, f3, f4, f5_8;
+ __ bind(&f0);
+ MemMoveEmitPopAndReturn(&masm);
+
+ __ bind(&f1);
+ __ mov_b(eax, Operand(src, 0));
+ __ mov_b(Operand(dst, 0), eax);
+ MemMoveEmitPopAndReturn(&masm);
+
+ __ bind(&f2);
+ __ mov_w(eax, Operand(src, 0));
+ __ mov_w(Operand(dst, 0), eax);
+ MemMoveEmitPopAndReturn(&masm);
+
+ __ bind(&f3);
+ __ mov_w(eax, Operand(src, 0));
+ __ mov_b(edx, Operand(src, 2));
+ __ mov_w(Operand(dst, 0), eax);
+ __ mov_b(Operand(dst, 2), edx);
+ MemMoveEmitPopAndReturn(&masm);
+
+ __ bind(&f4);
+ __ mov(eax, Operand(src, 0));
+ __ mov(Operand(dst, 0), eax);
+ MemMoveEmitPopAndReturn(&masm);
+
+ __ bind(&f5_8);
+ __ mov(eax, Operand(src, 0));
+ __ mov(edx, Operand(src, count, times_1, -4));
+ __ mov(Operand(dst, 0), eax);
+ __ mov(Operand(dst, count, times_1, -4), edx);
+ MemMoveEmitPopAndReturn(&masm);
+
+ small_handlers = new size_t[9];
+ small_handlers[0] = reinterpret_cast<intptr_t>(buffer) + f0.pos();
+ small_handlers[1] = reinterpret_cast<intptr_t>(buffer) + f1.pos();
+ small_handlers[2] = reinterpret_cast<intptr_t>(buffer) + f2.pos();
+ small_handlers[3] = reinterpret_cast<intptr_t>(buffer) + f3.pos();
+ small_handlers[4] = reinterpret_cast<intptr_t>(buffer) + f4.pos();
+ small_handlers[5] = reinterpret_cast<intptr_t>(buffer) + f5_8.pos();
+ small_handlers[6] = reinterpret_cast<intptr_t>(buffer) + f5_8.pos();
+ small_handlers[7] = reinterpret_cast<intptr_t>(buffer) + f5_8.pos();
+ small_handlers[8] = reinterpret_cast<intptr_t>(buffer) + f5_8.pos();
+
+ __ bind(&small_size); // Entry point into this block.
+ if (FLAG_debug_code) {
+ Label ok;
+ __ cmp(count, 8);
+ __ j(below_equal, &ok);
+ __ int3();
+ __ bind(&ok);
+ }
+ __ mov(eax, Operand(count, times_4,
+ reinterpret_cast<intptr_t>(small_handlers)));
+ __ jmp(eax);
}
-
} else {
- // SSE2 not supported. Unlikely to happen in practice.
- __ push(edi);
- __ push(esi);
- stack_offset += 2 * kPointerSize;
- __ cld();
- Register dst = edi;
- Register src = esi;
- Register count = ecx;
- __ mov(dst, Operand(esp, stack_offset + kDestinationOffset));
- __ mov(src, Operand(esp, stack_offset + kSourceOffset));
- __ mov(count, Operand(esp, stack_offset + kSizeOffset));
-
- // Copy the first word.
- __ mov(eax, Operand(src, 0));
- __ mov(Operand(dst, 0), eax);
-
- // Increment src,dstso that dst is aligned.
- __ mov(edx, dst);
- __ and_(edx, 0x03);
- __ neg(edx);
- __ add(edx, Immediate(4)); // edx = 4 - (dst & 3)
- __ add(dst, edx);
- __ add(src, edx);
- __ sub(count, edx);
- // edi is now aligned, ecx holds number of remaning bytes to copy.
-
- __ mov(edx, count);
- count = edx;
- __ shr(ecx, 2); // Make word count instead of byte count.
- __ rep_movs();
-
- // At most 3 bytes left to copy. Copy 4 bytes at end of string.
- __ and_(count, 3);
- __ mov(eax, Operand(src, count, times_1, -4));
- __ mov(Operand(dst, count, times_1, -4), eax);
-
- __ mov(eax, Operand(esp, stack_offset + kDestinationOffset));
- __ pop(esi);
- __ pop(edi);
- __ ret(0);
+ // No SSE2.
+ Label forward;
+ __ cmp(count, 0);
+ __ j(equal, &pop_and_return);
+ __ cmp(dst, src);
+ __ j(above, &backward);
+ __ jmp(&forward);
+ {
+ // Simple forward copier.
+ Label forward_loop_1byte, forward_loop_4byte;
+ __ bind(&forward_loop_4byte);
+ __ mov(eax, Operand(src, 0));
+ __ sub(count, Immediate(4));
+ __ add(src, Immediate(4));
+ __ mov(Operand(dst, 0), eax);
+ __ add(dst, Immediate(4));
+ __ bind(&forward); // Entry point.
+ __ cmp(count, 3);
+ __ j(above, &forward_loop_4byte);
+ __ bind(&forward_loop_1byte);
+ __ cmp(count, 0);
+ __ j(below_equal, &pop_and_return);
+ __ mov_b(eax, Operand(src, 0));
+ __ dec(count);
+ __ inc(src);
+ __ mov_b(Operand(dst, 0), eax);
+ __ inc(dst);
+ __ jmp(&forward_loop_1byte);
+ }
+ {
+ // Simple backward copier.
+ Label backward_loop_1byte, backward_loop_4byte, entry_shortcut;
+ __ bind(&backward);
+ __ add(src, count);
+ __ add(dst, count);
+ __ cmp(count, 3);
+ __ j(below_equal, &entry_shortcut);
+
+ __ bind(&backward_loop_4byte);
+ __ sub(src, Immediate(4));
+ __ sub(count, Immediate(4));
+ __ mov(eax, Operand(src, 0));
+ __ sub(dst, Immediate(4));
+ __ mov(Operand(dst, 0), eax);
+ __ cmp(count, 3);
+ __ j(above, &backward_loop_4byte);
+ __ bind(&backward_loop_1byte);
+ __ cmp(count, 0);
+ __ j(below_equal, &pop_and_return);
+ __ bind(&entry_shortcut);
+ __ dec(src);
+ __ dec(count);
+ __ mov_b(eax, Operand(src, 0));
+ __ dec(dst);
+ __ mov_b(Operand(dst, 0), eax);
+ __ jmp(&backward_loop_1byte);
+ }
}
+ __ bind(&pop_and_return);
+ MemMoveEmitPopAndReturn(&masm);
+
CodeDesc desc;
masm.GetCode(&desc);
ASSERT(!RelocInfo::RequiresRelocation(desc));
-
CPU::FlushICache(buffer, actual_size);
OS::ProtectCode(buffer, actual_size);
- return FUNCTION_CAST<OS::MemCopyFunction>(buffer);
+ return FUNCTION_CAST<OS::MemMoveFunction>(buffer);
}
+
#undef __
// -------------------------------------------------------------------------
Factory* factory = isolate->factory();
Handle<ByteArray> new_reloc =
factory->NewByteArray(reloc_length + padding, TENURED);
- memcpy(new_reloc->GetDataStartAddress() + padding,
- code->relocation_info()->GetDataStartAddress(),
- reloc_length);
+ OS::MemCopy(new_reloc->GetDataStartAddress() + padding,
+ code->relocation_info()->GetDataStartAddress(),
+ reloc_length);
// Create a relocation writer to write the comments in the padding
// space. Use position 0 for everything to ensure short encoding.
RelocInfoWriter reloc_info_writer(
// Move the relocation info to the beginning of the byte array.
int new_reloc_size = reloc_end_address - reloc_info_writer.pos();
- memmove(code->relocation_start(), reloc_info_writer.pos(), new_reloc_size);
+ OS::MemMove(
+ code->relocation_start(), reloc_info_writer.pos(), new_reloc_size);
// The relocation info is in place, update the size.
reloc_info->set_length(new_reloc_size);
// issues when the stack allocated buffer goes out of scope.
size_t length = builder.position();
Vector<char> copy = Vector<char>::New(length + 1);
- memcpy(copy.start(), builder.Finalize(), copy.length());
+ OS::MemCopy(copy.start(), builder.Finalize(), copy.length());
masm()->RecordComment(copy.start());
}
char* Isolate::ArchiveThread(char* to) {
- memcpy(to, reinterpret_cast<char*>(thread_local_top()),
- sizeof(ThreadLocalTop));
+ OS::MemCopy(to, reinterpret_cast<char*>(thread_local_top()),
+ sizeof(ThreadLocalTop));
InitializeThreadLocal();
clear_pending_exception();
clear_pending_message();
char* Isolate::RestoreThread(char* from) {
- memcpy(reinterpret_cast<char*>(thread_local_top()), from,
- sizeof(ThreadLocalTop));
+ OS::MemCopy(reinterpret_cast<char*>(thread_local_top()), from,
+ sizeof(ThreadLocalTop));
// This might be just paranoia, but it seems to be needed in case a
// thread_local_top_ is restored on a separate OS thread.
#ifdef USE_SIMULATOR
index);
if (result == RE_SUCCESS) {
// Copy capture results to the start of the registers array.
- memcpy(output, raw_output, number_of_capture_registers * sizeof(int32_t));
+ OS::MemCopy(
+ output, raw_output, number_of_capture_registers * sizeof(int32_t));
}
if (result == RE_EXCEPTION) {
ASSERT(!isolate->has_pending_exception());
#define V8_LIST_INL_H_
#include "list.h"
+#include "platform.h"
namespace v8 {
namespace internal {
void List<T, P>::Resize(int new_capacity, P alloc) {
ASSERT_LE(length_, new_capacity);
T* new_data = NewData(new_capacity, alloc);
- memcpy(new_data, data_, length_ * sizeof(T));
+ OS::MemCopy(new_data, data_, length_ * sizeof(T));
List<T, P>::DeleteData(data_);
data_ = new_data;
capacity_ = new_capacity;
// Copy the data.
int curently_used_size =
static_cast<int>(buffer_ + buffer_size_ - reloc_info_writer_.pos());
- memmove(new_buffer + new_buffer_size - curently_used_size,
- reloc_info_writer_.pos(), curently_used_size);
+ OS::MemMove(new_buffer + new_buffer_size - curently_used_size,
+ reloc_info_writer_.pos(), curently_used_size);
reloc_info_writer_.Reposition(
new_buffer + new_buffer_size - curently_used_size,
if (buffer.length() == code->relocation_size()) {
// Simply patch relocation area of code.
- memcpy(code->relocation_start(), buffer.start(), buffer.length());
+ OS::MemCopy(code->relocation_start(), buffer.start(), buffer.length());
return code;
} else {
// Relocation info section now has different size. We cannot simply
StackFrame* pre_pre_frame = frames[top_frame_index - 2];
- memmove(padding_start + kPointerSize - shortage_bytes,
- padding_start + kPointerSize,
- Debug::FramePaddingLayout::kFrameBaseSize * kPointerSize);
+ OS::MemMove(padding_start + kPointerSize - shortage_bytes,
+ padding_start + kPointerSize,
+ Debug::FramePaddingLayout::kFrameBaseSize * kPointerSize);
pre_top_frame->UpdateFp(pre_top_frame->fp() - shortage_bytes);
pre_pre_frame->SetCallerFp(pre_top_frame->fp());
// Open the low-level log file.
size_t len = strlen(name);
ScopedVector<char> ll_name(static_cast<int>(len + sizeof(kLowLevelLogExt)));
- memcpy(ll_name.start(), name, len);
- memcpy(ll_name.start() + len, kLowLevelLogExt, sizeof(kLowLevelLogExt));
+ OS::MemCopy(ll_name.start(), name, len);
+ OS::MemCopy(ll_name.start() + len,
+ kLowLevelLogExt, sizeof(kLowLevelLogExt));
ll_output_handle_ = OS::FOpen(ll_name.start(), OS::LogFileOpenMode);
setvbuf(ll_output_handle_, NULL, _IOFBF, kLowLevelLogBufferSize);
}
void AppendBytes(const char* bytes, int size) {
size = Min(size, kUtf8BufferSize - utf8_pos_);
- memcpy(utf8_buffer_ + utf8_pos_, bytes, size);
+ OS::MemCopy(utf8_buffer_ + utf8_pos_, bytes, size);
utf8_pos_ += size;
}
void Assembler::DoubleAsTwoUInt32(double d, uint32_t* lo, uint32_t* hi) {
uint64_t i;
- memcpy(&i, &d, 8);
+ OS::MemCopy(&i, &d, 8);
*lo = i & 0xffffffff;
*hi = i >> 32;
// Copy the data.
int pc_delta = desc.buffer - buffer_;
int rc_delta = (desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_);
- memmove(desc.buffer, buffer_, desc.instr_size);
- memmove(reloc_info_writer.pos() + rc_delta,
- reloc_info_writer.pos(), desc.reloc_size);
+ OS::MemMove(desc.buffer, buffer_, desc.instr_size);
+ OS::MemMove(reloc_info_writer.pos() + rc_delta,
+ reloc_info_writer.pos(), desc.reloc_size);
// Switch buffers.
DeleteArray(buffer_);
// issues when the stack allocated buffer goes out of scope.
size_t length = builder.position();
Vector<char> copy = Vector<char>::New(length + 1);
- memcpy(copy.start(), builder.Finalize(), copy.length());
+ OS::MemCopy(copy.start(), builder.Finalize(), copy.length());
masm()->RecordComment(copy.start());
}
Instruction::kInstrSize) == 0);
} else {
// Cache miss. Load memory into the cache.
- memcpy(cached_line, line, CachePage::kLineLength);
+ OS::MemCopy(cached_line, line, CachePage::kLineLength);
*cache_valid_byte = CachePage::LINE_VALID;
}
}
// Read the bits from the unsigned integer register_[] array
// into the double precision floating point value and return it.
char buffer[2 * sizeof(registers_[0])];
- memcpy(buffer, ®isters_[reg], 2 * sizeof(registers_[0]));
- memcpy(&dm_val, buffer, 2 * sizeof(registers_[0]));
+ OS::MemCopy(buffer, ®isters_[reg], 2 * sizeof(registers_[0]));
+ OS::MemCopy(&dm_val, buffer, 2 * sizeof(registers_[0]));
return(dm_val);
}
// Registers a0 and a1 -> x.
reg_buffer[0] = get_register(a0);
reg_buffer[1] = get_register(a1);
- memcpy(x, buffer, sizeof(buffer));
+ OS::MemCopy(x, buffer, sizeof(buffer));
// Registers a2 and a3 -> y.
reg_buffer[0] = get_register(a2);
reg_buffer[1] = get_register(a3);
- memcpy(y, buffer, sizeof(buffer));
+ OS::MemCopy(y, buffer, sizeof(buffer));
// Register 2 -> z.
reg_buffer[0] = get_register(a2);
- memcpy(z, buffer, sizeof(*z));
+ OS::MemCopy(z, buffer, sizeof(*z));
}
}
} else {
char buffer[2 * sizeof(registers_[0])];
int32_t* reg_buffer = reinterpret_cast<int32_t*>(buffer);
- memcpy(buffer, &result, sizeof(buffer));
+ OS::MemCopy(buffer, &result, sizeof(buffer));
// Copy result to v0 and v1.
set_register(v0, reg_buffer[0]);
set_register(v1, reg_buffer[1]);
} else {
int buffer[2];
ASSERT(sizeof(buffer[0]) * 2 == sizeof(d0));
- memcpy(buffer, &d0, sizeof(d0));
+ OS::MemCopy(buffer, &d0, sizeof(d0));
set_dw_register(a0, buffer);
- memcpy(buffer, &d1, sizeof(d1));
+ OS::MemCopy(buffer, &d1, sizeof(d1));
set_dw_register(a2, buffer);
}
CallInternal(entry);
if (previous_length != 0) {
uint8_t* previous_array =
previous->serialized_data()->GetDataStartAddress();
- memcpy(array, previous_array, previous_length);
+ OS::MemCopy(array, previous_array, previous_length);
array += previous_length;
}
ASSERT(reinterpret_cast<uintptr_t>(array) % sizeof(uintptr_t) == 0);
#if defined(V8_TARGET_ARCH_IA32)
-static OS::MemCopyFunction memcopy_function = NULL;
+static void MemMoveWrapper(void* dest, const void* src, size_t size) {
+ memmove(dest, src, size);
+}
+
+// Initialize to library version so we can call this at any time during startup.
+static OS::MemMoveFunction memmove_function = &MemMoveWrapper;
+
// Defined in codegen-ia32.cc.
-OS::MemCopyFunction CreateMemCopyFunction();
+OS::MemMoveFunction CreateMemMoveFunction();
-// Copy memory area to disjoint memory area.
-void OS::MemCopy(void* dest, const void* src, size_t size) {
+// Copy memory area. No restrictions.
+void OS::MemMove(void* dest, const void* src, size_t size) {
// Note: here we rely on dependent reads being ordered. This is true
// on all architectures we currently support.
- (*memcopy_function)(dest, src, size);
-#ifdef DEBUG
- CHECK_EQ(0, memcmp(dest, src, size));
-#endif
+ (*memmove_function)(dest, src, size);
}
+
#endif // V8_TARGET_ARCH_IA32
void POSIXPostSetUp() {
#if defined(V8_TARGET_ARCH_IA32)
- memcopy_function = CreateMemCopyFunction();
+ OS::MemMoveFunction generated_memmove = CreateMemMoveFunction();
+ if (generated_memmove != NULL) {
+ memmove_function = generated_memmove;
+ }
#endif
init_fast_sin_function();
init_fast_cos_function();
static Mutex* limit_mutex = NULL;
#if defined(V8_TARGET_ARCH_IA32)
-static OS::MemCopyFunction memcopy_function = NULL;
+static void MemMoveWrapper(void* dest, const void* src, size_t size) {
+ memmove(dest, src, size);
+}
+
+// Initialize to library version so we can call this at any time during startup.
+static OS::MemMoveFunction memmove_function = &MemMoveWrapper;
+
// Defined in codegen-ia32.cc.
-OS::MemCopyFunction CreateMemCopyFunction();
+OS::MemMoveFunction CreateMemMoveFunction();
// Copy memory area to disjoint memory area.
-void OS::MemCopy(void* dest, const void* src, size_t size) {
+void OS::MemMove(void* dest, const void* src, size_t size) {
// Note: here we rely on dependent reads being ordered. This is true
// on all architectures we currently support.
- (*memcopy_function)(dest, src, size);
-#ifdef DEBUG
- CHECK_EQ(0, memcmp(dest, src, size));
-#endif
+ (*memmove_function)(dest, src, size);
}
+
#endif // V8_TARGET_ARCH_IA32
#ifdef _WIN64
// CPU.
MathSetup();
#if defined(V8_TARGET_ARCH_IA32)
- memcopy_function = CreateMemCopyFunction();
+ OS::MemMoveFunction generated_memmove = CreateMemMoveFunction();
+ if (generated_memmove != NULL) {
+ memmove_function = generated_memmove;
+ }
#endif
}
if (file_mapping == NULL) return NULL;
// Map a view of the file into memory
void* memory = MapViewOfFile(file_mapping, FILE_MAP_ALL_ACCESS, 0, 0, size);
- if (memory) memmove(memory, initial, size);
+ if (memory) OS::MemMove(memory, initial, size);
return new Win32MemoryMappedFile(file, file_mapping, memory, size);
}
static void ReleaseStore(volatile AtomicWord* ptr, AtomicWord value);
#if defined(V8_TARGET_ARCH_IA32)
- // Copy memory area to disjoint memory area.
- static void MemCopy(void* dest, const void* src, size_t size);
// Limit below which the extra overhead of the MemCopy function is likely
// to outweigh the benefits of faster copying.
static const int kMinComplexMemCopy = 64;
- typedef void (*MemCopyFunction)(void* dest, const void* src, size_t size);
+ // Copy memory area. No restrictions.
+ static void MemMove(void* dest, const void* src, size_t size);
+ typedef void (*MemMoveFunction)(void* dest, const void* src, size_t size);
+
+ // Keep the distinction of "move" vs. "copy" for the benefit of other
+ // architectures.
+ static void MemCopy(void* dest, const void* src, size_t size) {
+ MemMove(dest, src, size);
+ }
#else // V8_TARGET_ARCH_IA32
+ // Copy memory area to disjoint memory area.
static void MemCopy(void* dest, const void* src, size_t size) {
memcpy(dest, src, size);
}
+ static void MemMove(void* dest, const void* src, size_t size) {
+ memmove(dest, src, size);
+ }
static const int kMinComplexMemCopy = 16 * kPointerSize;
#endif // V8_TARGET_ARCH_IA32
Vector<unsigned> data = Vector<unsigned>::New(total_size);
preamble_[PreparseDataConstants::kFunctionsSizeOffset] = function_size;
preamble_[PreparseDataConstants::kSymbolCountOffset] = 0;
- memcpy(data.start(), preamble_, sizeof(preamble_));
+ OS::MemCopy(data.start(), preamble_, sizeof(preamble_));
int symbol_start = PreparseDataConstants::kHeaderSize + function_size;
if (function_size > 0) {
function_store_.WriteTo(data.SubVector(PreparseDataConstants::kHeaderSize,
Vector<unsigned> data = Vector<unsigned>::New(total_size);
preamble_[PreparseDataConstants::kFunctionsSizeOffset] = function_size;
preamble_[PreparseDataConstants::kSymbolCountOffset] = symbol_id_;
- memcpy(data.start(), preamble_, sizeof(preamble_));
+ OS::MemCopy(data.start(), preamble_, sizeof(preamble_));
int symbol_start = PreparseDataConstants::kHeaderSize + function_size;
if (function_size > 0) {
function_store_.WriteTo(data.SubVector(PreparseDataConstants::kHeaderSize,
// Hit the bottom of the allocated pushback buffer.
// Double the buffer and continue.
uc16* new_buffer = NewArray<uc16>(pushback_buffer_backing_size_ * 2);
- memcpy(new_buffer + pushback_buffer_backing_size_,
- pushback_buffer_backing_,
- pushback_buffer_backing_size_);
+ OS::MemCopy(new_buffer + pushback_buffer_backing_size_,
+ pushback_buffer_backing_,
+ pushback_buffer_backing_size_);
DeleteArray(pushback_buffer_backing_);
buffer_cursor_ = new_buffer + pushback_buffer_backing_size_;
pushback_buffer_backing_ = pushback_buffer_ = new_buffer;
const int slack = 32;
int new_size = size_ + (size_ >> 1) + slack;
char* new_output = NewArray<char>(new_size);
- memcpy(new_output, output_, pos_);
+ OS::MemCopy(new_output, output_, pos_);
DeleteArray(output_);
output_ = new_output;
size_ = new_size;
void RegExpMacroAssemblerIrregexp::Copy(Address a) {
- memcpy(a, buffer_.start(), length());
+ OS::MemCopy(a, buffer_.start(), length());
}
Vector<byte> old_buffer = buffer_;
buffer_ = Vector<byte>::New(old_buffer.length() * 2);
own_buffer_ = true;
- memcpy(buffer_.start(), old_buffer.start(), old_buffer.length());
+ OS::MemCopy(buffer_.start(), old_buffer.start(), old_buffer.length());
if (old_buffer_was_our_own) {
old_buffer.Dispose();
}
char* RegExpStack::ArchiveStack(char* to) {
size_t size = sizeof(thread_local_);
- memcpy(reinterpret_cast<void*>(to),
- &thread_local_,
- size);
+ OS::MemCopy(reinterpret_cast<void*>(to), &thread_local_, size);
thread_local_ = ThreadLocal();
return to + size;
}
char* RegExpStack::RestoreStack(char* from) {
size_t size = sizeof(thread_local_);
- memcpy(&thread_local_, reinterpret_cast<void*>(from), size);
+ OS::MemCopy(&thread_local_, reinterpret_cast<void*>(from), size);
return from + size;
}
Address new_memory = NewArray<byte>(static_cast<int>(size));
if (thread_local_.memory_size_ > 0) {
// Copy original memory into top of new memory.
- memcpy(reinterpret_cast<void*>(
- new_memory + size - thread_local_.memory_size_),
- reinterpret_cast<void*>(thread_local_.memory_),
- thread_local_.memory_size_);
+ OS::MemCopy(
+ reinterpret_cast<void*>(
+ new_memory + size - thread_local_.memory_size_),
+ reinterpret_cast<void*>(thread_local_.memory_),
+ thread_local_.memory_size_);
DeleteArray(thread_local_.memory_);
}
thread_local_.memory_ = new_memory;
void ExpandBuffer() {
Vector<byte> new_store = Vector<byte>::New(NewCapacity(kInitialCapacity));
- memcpy(new_store.start(), backing_store_.start(), position_);
+ OS::MemCopy(new_store.start(), backing_store_.start(), position_);
backing_store_.Dispose();
backing_store_ = new_store;
}
void SnapshotByteSource::CopyRaw(byte* to, int number_of_bytes) {
- memcpy(to, data_ + position_, number_of_bytes);
+ OS::MemCopy(to, data_ + position_, number_of_bytes);
position_ += number_of_bytes;
}
SmartArrayPointer<const char> StringStream::ToCString() const {
char* str = NewArray<char>(length_ + 1);
- memcpy(str, buffer_, length_);
+ OS::MemCopy(str, buffer_, length_);
str[length_] = '\0';
return SmartArrayPointer<const char>(str);
}
if (new_space == NULL) {
return space_;
}
- memcpy(new_space, space_, *bytes);
+ OS::MemCopy(new_space, space_, *bytes);
*bytes = new_bytes;
DeleteArray(space_);
space_ = new_space;
#include "unicode.h"
#include "checks.h"
+#include "platform.h"
namespace unibrow {
unsigned buffer_length =
last_byte_of_buffer_unused_ ? kBufferSize - 1 : kBufferSize;
unsigned memcpy_length = length <= buffer_length ? length : buffer_length;
- memcpy(data, buffer_, memcpy_length*sizeof(uint16_t));
+ v8::internal::OS::MemCopy(data, buffer_, memcpy_length*sizeof(uint16_t));
if (length <= buffer_length) return length;
ASSERT(unbuffered_start_ != NULL);
// Copy the rest the slow way.
#include <stdarg.h>
#include "../include/v8stdint.h"
#include "checks.h"
+#include "platform.h"
#include "utils.h"
namespace v8 {
void SimpleStringBuilder::AddSubstring(const char* s, int n) {
ASSERT(!is_finalized() && position_ + n <= buffer_.length());
ASSERT(static_cast<size_t>(n) <= strlen(s));
- memcpy(&buffer_[position_], s, n * kCharSize);
+ OS::MemCopy(&buffer_[position_], s, n * kCharSize);
position_ += n;
}
#include <string.h>
#include <climits>
-#include "globals.h"
-#include "checks.h"
#include "allocation.h"
+#include "checks.h"
+#include "globals.h"
namespace v8 {
namespace internal {
// When copying, make underlying Vector to reference our buffer.
EmbeddedVector(const EmbeddedVector& rhs)
: Vector<T>(rhs) {
+ // TODO(jkummerow): Refactor #includes and use OS::MemCopy() instead.
memcpy(buffer_, rhs.buffer_, sizeof(T) * kSize);
set_start(buffer_);
}
EmbeddedVector& operator=(const EmbeddedVector& rhs) {
if (this == &rhs) return *this;
Vector<T>::operator=(rhs);
+ // TODO(jkummerow): Refactor #includes and use OS::MemCopy() instead.
memcpy(buffer_, rhs.buffer_, sizeof(T) * kSize);
this->set_start(buffer_);
return *this;
INLINE(static Dest cast(const Source& source)) {
Dest dest;
+ // TODO(jkummerow): Refactor #includes and use OS::MemCopy() instead.
memcpy(&dest, &source, sizeof(dest));
return dest;
}
char* new_result = NewArray<char>(new_len);
// Copy the existing input into the new array and set the new
// array as the result.
- memcpy(new_result, result, offset * kCharSize);
+ OS::MemCopy(new_result, result, offset * kCharSize);
DeleteArray(result);
result = new_result;
}
// Copy the newly read line into the result.
- memcpy(result + offset, line_buf, len * kCharSize);
+ OS::MemCopy(result + offset, line_buf, len * kCharSize);
offset += len;
}
ASSERT(result != NULL);
// ----------------------------------------------------------------------------
// Memory
-// Copies data from |src| to |dst|. The data spans must not overlap.
+// Copies words from |src| to |dst|. The data spans must not overlap.
template <typename T>
-inline void CopyWords(T* dst, T* src, int num_words) {
+inline void CopyWords(T* dst, const T* src, size_t num_words) {
STATIC_ASSERT(sizeof(T) == kPointerSize);
- ASSERT(Min(dst, src) + num_words <= Max(dst, src));
+ ASSERT(Min(dst, const_cast<T*>(src)) + num_words <=
+ Max(dst, const_cast<T*>(src)));
ASSERT(num_words > 0);
// Use block copying OS::MemCopy if the segment we're copying is
// enough to justify the extra call/setup overhead.
- static const int kBlockCopyLimit = 16;
- STATIC_ASSERT(kBlockCopyLimit * kPointerSize >= OS::kMinComplexMemCopy);
+ static const size_t kBlockCopyLimit = 16;
- if (num_words >= kBlockCopyLimit) {
- OS::MemCopy(dst, src, num_words * kPointerSize);
+ if (num_words < kBlockCopyLimit) {
+ do {
+ num_words--;
+ *dst++ = *src++;
+ } while (num_words > 0);
} else {
- int remaining = num_words;
+ OS::MemCopy(dst, src, num_words * kPointerSize);
+ }
+}
+
+
+// Copies words from |src| to |dst|. No restrictions.
+template <typename T>
+inline void MoveWords(T* dst, const T* src, size_t num_words) {
+ STATIC_ASSERT(sizeof(T) == kPointerSize);
+ ASSERT(num_words > 0);
+
+ // Use block copying OS::MemCopy if the segment we're copying is
+ // enough to justify the extra call/setup overhead.
+ static const size_t kBlockCopyLimit = 16;
+
+ if (num_words < kBlockCopyLimit &&
+ ((dst < src) || (dst >= (src + num_words * kPointerSize)))) {
+ T* end = dst + num_words;
do {
- remaining--;
+ num_words--;
*dst++ = *src++;
- } while (remaining > 0);
+ } while (num_words > 0);
+ } else {
+ OS::MemMove(dst, src, num_words * kPointerSize);
}
}
// Copies data from |src| to |dst|. The data spans must not overlap.
template <typename T>
-inline void CopyBytes(T* dst, T* src, size_t num_bytes) {
+inline void CopyBytes(T* dst, const T* src, size_t num_bytes) {
STATIC_ASSERT(sizeof(T) == 1);
- ASSERT(Min(dst, src) + num_bytes <= Max(dst, src));
+ ASSERT(Min(dst, const_cast<T*>(src)) + num_bytes <=
+ Max(dst, const_cast<T*>(src)));
if (num_bytes == 0) return;
// Use block copying OS::MemCopy if the segment we're copying is
// enough to justify the extra call/setup overhead.
static const int kBlockCopyLimit = OS::kMinComplexMemCopy;
- if (num_bytes >= static_cast<size_t>(kBlockCopyLimit)) {
- OS::MemCopy(dst, src, num_bytes);
- } else {
- size_t remaining = num_bytes;
+ if (num_bytes < static_cast<size_t>(kBlockCopyLimit)) {
do {
- remaining--;
+ num_bytes--;
*dst++ = *src++;
- } while (remaining > 0);
+ } while (num_bytes > 0);
+ } else {
+ OS::MemCopy(dst, src, num_bytes);
+ }
+}
+
+
+// Copies data from |src| to |dst|. No restrictions.
+template <typename T>
+inline void MoveBytes(T* dst, const T* src, size_t num_bytes) {
+ STATIC_ASSERT(sizeof(T) == 1);
+ switch (num_bytes) {
+ case 0: return;
+ case 1:
+ *dst = *src;
+ return;
+#ifdef V8_HOST_CAN_READ_UNALIGNED
+ case 2:
+ *reinterpret_cast<uint16_t*>(dst) = *reinterpret_cast<const uint16_t*>(src);
+ return;
+ case 3: {
+ uint16_t part1 = *reinterpret_cast<const uint16_t*>(src);
+ byte part2 = *(src + 2);
+ *reinterpret_cast<uint16_t*>(dst) = part1;
+ *(dst + 2) = part2;
+ return;
+ }
+ case 4:
+ *reinterpret_cast<uint32_t*>(dst) = *reinterpret_cast<const uint32_t*>(src);
+ return;
+ case 5:
+ case 6:
+ case 7:
+ case 8: {
+ uint32_t part1 = *reinterpret_cast<const uint32_t*>(src);
+ uint32_t part2 = *reinterpret_cast<const uint32_t*>(src + num_bytes - 4);
+ *reinterpret_cast<uint32_t*>(dst) = part1;
+ *reinterpret_cast<uint32_t*>(dst + num_bytes - 4) = part2;
+ return;
+ }
+ case 9:
+ case 10:
+ case 11:
+ case 12:
+ case 13:
+ case 14:
+ case 15:
+ case 16: {
+ double part1 = *reinterpret_cast<const double*>(src);
+ double part2 = *reinterpret_cast<const double*>(src + num_bytes - 8);
+ *reinterpret_cast<double*>(dst) = part1;
+ *reinterpret_cast<double*>(dst + num_bytes - 8) = part2;
+ return;
+ }
+#endif
+ default:
+ OS::MemMove(dst, src, num_bytes);
+ return;
}
}
intptr_t pc_delta = desc.buffer - buffer_;
intptr_t rc_delta = (desc.buffer + desc.buffer_size) -
(buffer_ + buffer_size_);
- memmove(desc.buffer, buffer_, desc.instr_size);
- memmove(rc_delta + reloc_info_writer.pos(),
- reloc_info_writer.pos(), desc.reloc_size);
+ OS::MemMove(desc.buffer, buffer_, desc.instr_size);
+ OS::MemMove(rc_delta + reloc_info_writer.pos(),
+ reloc_info_writer.pos(), desc.reloc_size);
// Switch buffers.
if (isolate() != NULL &&
// issues when the stack allocated buffer goes out of scope.
int length = builder.position();
Vector<char> copy = Vector<char>::New(length + 1);
- memcpy(copy.start(), builder.Finalize(), copy.length());
+ OS::MemCopy(copy.start(), builder.Finalize(), copy.length());
masm()->RecordComment(copy.start());
}
// Serialize.
int serialized_data_length = sd->Length();
char* serialized_data = i::NewArray<char>(serialized_data_length);
- memcpy(serialized_data, sd->Data(), serialized_data_length);
+ i::OS::MemCopy(serialized_data, sd->Data(), serialized_data_length);
// Deserialize.
v8::ScriptData* deserialized_sd =
static double DoubleFromBits(uint64_t value) {
double target;
- memcpy(&target, &value, sizeof(target));
+ i::OS::MemCopy(&target, &value, sizeof(target));
return target;
}
static uint64_t DoubleToBits(double value) {
uint64_t target;
- memcpy(&target, &value, sizeof(target));
+ i::OS::MemCopy(&target, &value, sizeof(target));
return target;
}
for (int i = 0; i < max_rows; ++i) {
if (i > 0)
buffer[i - 1] = '\n';
- memcpy(&buffer[i], function_f, sizeof(function_f) - 1);
+ OS::MemCopy(&buffer[i], function_f, sizeof(function_f) - 1);
v8::Handle<v8::String> script_body = v8::String::New(buffer.start());
v8::Script::Compile(script_body, &origin)->Run();
v8::Local<v8::Function> f = v8::Local<v8::Function>::Cast(
if (abort_countdown_ == 0) return kAbort;
CHECK_GT(chars_written, 0);
i::Vector<char> chunk = buffer_.AddBlock(chars_written, '\0');
- memcpy(chunk.start(), buffer, chars_written);
+ i::OS::MemCopy(chunk.start(), buffer, chars_written);
return kContinue;
}
virtual WriteResult WriteUint32Chunk(uint32_t* buffer, int chars_written) {
// Adding characters will make keyword matching fail.
static const char chars_to_append[] = { 'z', '0', '_' };
for (int j = 0; j < static_cast<int>(ARRAY_SIZE(chars_to_append)); ++j) {
- memmove(buffer, keyword, length);
+ i::OS::MemMove(buffer, keyword, length);
buffer[length] = chars_to_append[j];
i::Utf8ToUtf16CharacterStream stream(buffer, length + 1);
i::Scanner scanner(&unicode_cache);
}
// Replacing characters will make keyword matching fail.
{
- memmove(buffer, keyword, length);
+ i::OS::MemMove(buffer, keyword, length);
buffer[length - 1] = '_';
i::Utf8ToUtf16CharacterStream stream(buffer, length);
i::Scanner scanner(&unicode_cache);
}
-void TestMemCopy(Vector<byte> src,
- Vector<byte> dst,
- int source_alignment,
- int destination_alignment,
- int length_alignment) {
- memset(dst.start(), 0xFF, dst.length());
- byte* to = dst.start() + 32 + destination_alignment;
- byte* from = src.start() + source_alignment;
- int length = OS::kMinComplexMemCopy + length_alignment;
- OS::MemCopy(to, from, static_cast<size_t>(length));
- printf("[%d,%d,%d]\n",
- source_alignment, destination_alignment, length_alignment);
- for (int i = 0; i < length; i++) {
- CHECK_EQ(from[i], to[i]);
+static const int kAreaSize = 512;
+
+
+void TestMemMove(byte* area1,
+ byte* area2,
+ byte* area3,
+ int src_offset,
+ int dest_offset,
+ int length) {
+ for (int i = 0; i < kAreaSize; i++) {
+ area1[i] = i & 0xFF;
+ area2[i] = i & 0xFF;
+ area3[i] = i & 0xFF;
+ }
+ OS::MemMove(area1 + dest_offset, area1 + src_offset, length);
+ MoveBytes(area2 + dest_offset, area2 + src_offset, length);
+ memmove(area3 + dest_offset, area3 + src_offset, length);
+ if (memcmp(area1, area3, kAreaSize) != 0) {
+ printf("OS::MemMove(): src_offset: %d, dest_offset: %d, length: %d\n",
+ src_offset, dest_offset, length);
+ for (int i = 0; i < kAreaSize; i++) {
+ if (area1[i] == area3[i]) continue;
+ printf("diff at offset %d (%p): is %d, should be %d\n",
+ i, reinterpret_cast<void*>(area1 + i), area1[i], area3[i]);
+ }
+ CHECK(false);
+ }
+ if (memcmp(area2, area3, kAreaSize) != 0) {
+ printf("MoveBytes(): src_offset: %d, dest_offset: %d, length: %d\n",
+ src_offset, dest_offset, length);
+ for (int i = 0; i < kAreaSize; i++) {
+ if (area2[i] == area3[i]) continue;
+ printf("diff at offset %d (%p): is %d, should be %d\n",
+ i, reinterpret_cast<void*>(area2 + i), area2[i], area3[i]);
+ }
+ CHECK(false);
}
- CHECK_EQ(0xFF, to[-1]);
- CHECK_EQ(0xFF, to[length]);
}
-
-TEST(MemCopy) {
+TEST(MemMove) {
v8::V8::Initialize();
OS::SetUp();
- const int N = OS::kMinComplexMemCopy + 128;
- Vector<byte> buffer1 = Vector<byte>::New(N);
- Vector<byte> buffer2 = Vector<byte>::New(N);
-
- for (int i = 0; i < N; i++) {
- buffer1[i] = static_cast<byte>(i & 0x7F);
- }
-
- // Same alignment.
- for (int i = 0; i < 32; i++) {
- TestMemCopy(buffer1, buffer2, i, i, i * 2);
- }
-
- // Different alignment.
- for (int i = 0; i < 32; i++) {
- for (int j = 1; j < 32; j++) {
- TestMemCopy(buffer1, buffer2, i, (i + j) & 0x1F , 0);
+ byte* area1 = new byte[kAreaSize];
+ byte* area2 = new byte[kAreaSize];
+ byte* area3 = new byte[kAreaSize];
+
+ static const int kMinOffset = 32;
+ static const int kMaxOffset = 64;
+ static const int kMaxLength = 128;
+ STATIC_ASSERT(kMaxOffset + kMaxLength < kAreaSize);
+
+ for (int src_offset = kMinOffset; src_offset <= kMaxOffset; src_offset++) {
+ for (int dst_offset = kMinOffset; dst_offset <= kMaxOffset; dst_offset++) {
+ for (int length = 0; length <= kMaxLength; length++) {
+ TestMemMove(area1, area2, area3, src_offset, dst_offset, length);
+ }
}
}
-
- // Different lengths
- for (int i = 0; i < 32; i++) {
- TestMemCopy(buffer1, buffer2, 3, 7, i);
- }
-
- buffer2.Dispose();
- buffer1.Dispose();
+ delete[] area1;
+ delete[] area2;
+ delete[] area3;
}