1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
12 #include "allocation.h"
21 // ----------------------------------------------------------------------------
22 // General helper functions
24 #define IS_POWER_OF_TWO(x) ((x) != 0 && (((x) & ((x) - 1)) == 0))
26 // Returns true iff x is a power of 2. Cannot be used with the maximally
27 // negative value of the type T (the -1 overflows).
29 inline bool IsPowerOf2(T x) {
30 return IS_POWER_OF_TWO(x);
34 // X must be a power of 2. Returns the number of trailing zeros.
35 inline int WhichPowerOf2(uint32_t x) {
36 ASSERT(IsPowerOf2(x));
54 default: UNREACHABLE();
55 case 8: bits++; // Fall through.
56 case 4: bits++; // Fall through.
57 case 2: bits++; // Fall through.
60 ASSERT_EQ(1 << bits, original_x);
66 inline int MostSignificantBit(uint32_t x) {
67 static const int msb4[] = {0, 1, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4};
81 return nibble + msb4[x];
85 // The C++ standard leaves the semantics of '>>' undefined for
86 // negative signed operands. Most implementations do the right thing,
88 inline int ArithmeticShiftRight(int x, int s) {
93 // Compute the 0-relative offset of some absolute value x of type T.
94 // This allows conversion of Addresses and integral types into
95 // 0-relative int offsets.
97 inline intptr_t OffsetFrom(T x) {
98 return x - static_cast<T>(0);
102 // Compute the absolute value of type T for some 0-relative offset x.
103 // This allows conversion of 0-relative int offsets into Addresses and
105 template <typename T>
106 inline T AddressFrom(intptr_t x) {
107 return static_cast<T>(static_cast<T>(0) + x);
111 // Return the largest multiple of m which is <= x.
112 template <typename T>
113 inline T RoundDown(T x, intptr_t m) {
114 ASSERT(IsPowerOf2(m));
115 return AddressFrom<T>(OffsetFrom(x) & -m);
119 // Return the smallest multiple of m which is >= x.
120 template <typename T>
121 inline T RoundUp(T x, intptr_t m) {
122 return RoundDown<T>(static_cast<T>(x + m - 1), m);
126 // Increment a pointer until it has the specified alignment.
127 // This works like RoundUp, but it works correctly on pointer types where
128 // sizeof(*pointer) might not be 1.
130 T AlignUp(T pointer, size_t alignment) {
131 ASSERT(sizeof(pointer) == sizeof(uintptr_t));
132 uintptr_t pointer_raw = reinterpret_cast<uintptr_t>(pointer);
133 return reinterpret_cast<T>(RoundUp(pointer_raw, alignment));
137 template <typename T>
138 int Compare(const T& a, const T& b) {
148 template <typename T>
149 int PointerValueCompare(const T* a, const T* b) {
150 return Compare<T>(*a, *b);
154 // Compare function to compare the object pointer value of two
155 // handlified objects. The handles are passed as pointers to the
157 template<typename T> class Handle; // Forward declaration.
158 template <typename T>
159 int HandleObjectPointerCompare(const Handle<T>* a, const Handle<T>* b) {
160 return Compare<T*>(*(*a), *(*b));
164 // Returns the smallest power of two which is >= x. If you pass in a
165 // number that is already a power of two, it is returned as is.
166 // Implementation is from "Hacker's Delight" by Henry S. Warren, Jr.,
167 // figure 3-3, page 48, where the function is called clp2.
168 inline uint32_t RoundUpToPowerOf2(uint32_t x) {
169 ASSERT(x <= 0x80000000u);
180 inline uint32_t RoundDownToPowerOf2(uint32_t x) {
181 uint32_t rounded_up = RoundUpToPowerOf2(x);
182 if (rounded_up > x) return rounded_up >> 1;
187 template <typename T, typename U>
188 inline bool IsAligned(T value, U alignment) {
189 return (value & (alignment - 1)) == 0;
193 // Returns true if (addr + offset) is aligned.
194 inline bool IsAddressAligned(Address addr,
197 intptr_t offs = OffsetFrom(addr + offset);
198 return IsAligned(offs, alignment);
202 // Returns the maximum of the two parameters.
203 template <typename T>
205 return a < b ? b : a;
209 // Returns the minimum of the two parameters.
210 template <typename T>
212 return a < b ? a : b;
216 // Returns the absolute value of its argument.
217 template <typename T>
219 return a < 0 ? -a : a;
223 // Returns the negative absolute value of its argument.
224 template <typename T>
226 return a < 0 ? a : -a;
230 // TODO(svenpanne) Clean up the whole power-of-2 mess.
231 inline int32_t WhichPowerOf2Abs(int32_t x) {
232 return (x == kMinInt) ? 31 : WhichPowerOf2(Abs(x));
236 // ----------------------------------------------------------------------------
237 // BitField is a help template for encoding and decode bitfield with
240 template<class T, int shift, int size, class U>
243 // A type U mask of bit field. To use all bits of a type U of x bits
244 // in a bitfield without compiler warnings we have to compute 2^x
245 // without using a shift count of x in the computation.
246 static const U kOne = static_cast<U>(1U);
247 static const U kMask = ((kOne << shift) << size) - (kOne << shift);
248 static const U kShift = shift;
249 static const U kSize = size;
251 // Value for the field with all bits set.
252 static const T kMax = static_cast<T>((1U << size) - 1);
254 // Tells whether the provided value fits into the bit field.
255 static bool is_valid(T value) {
256 return (static_cast<U>(value) & ~static_cast<U>(kMax)) == 0;
259 // Returns a type U with the bit field value encoded.
260 static U encode(T value) {
261 ASSERT(is_valid(value));
262 return static_cast<U>(value) << shift;
265 // Returns a type U with the bit field value updated.
266 static U update(U previous, T value) {
267 return (previous & ~kMask) | encode(value);
270 // Extracts the bit field from the value.
271 static T decode(U value) {
272 return static_cast<T>((value & kMask) >> shift);
277 template<class T, int shift, int size>
278 class BitField : public BitFieldBase<T, shift, size, uint32_t> { };
281 template<class T, int shift, int size>
282 class BitField64 : public BitFieldBase<T, shift, size, uint64_t> { };
285 // ----------------------------------------------------------------------------
288 static const uint32_t kZeroHashSeed = 0;
290 // Thomas Wang, Integer Hash Functions.
291 // http://www.concentric.net/~Ttwang/tech/inthash.htm
292 inline uint32_t ComputeIntegerHash(uint32_t key, uint32_t seed) {
295 hash = ~hash + (hash << 15); // hash = (hash << 15) - hash - 1;
296 hash = hash ^ (hash >> 12);
297 hash = hash + (hash << 2);
298 hash = hash ^ (hash >> 4);
299 hash = hash * 2057; // hash = (hash + (hash << 3)) + (hash << 11);
300 hash = hash ^ (hash >> 16);
305 inline uint32_t ComputeLongHash(uint64_t key) {
307 hash = ~hash + (hash << 18); // hash = (hash << 18) - hash - 1;
308 hash = hash ^ (hash >> 31);
309 hash = hash * 21; // hash = (hash + (hash << 2)) + (hash << 4);
310 hash = hash ^ (hash >> 11);
311 hash = hash + (hash << 6);
312 hash = hash ^ (hash >> 22);
313 return static_cast<uint32_t>(hash);
317 inline uint32_t ComputePointerHash(void* ptr) {
318 return ComputeIntegerHash(
319 static_cast<uint32_t>(reinterpret_cast<intptr_t>(ptr)),
320 v8::internal::kZeroHashSeed);
324 // ----------------------------------------------------------------------------
327 // A static resource holds a static instance that can be reserved in
328 // a local scope using an instance of Access. Attempts to re-reserve
329 // the instance will cause an error.
330 template <typename T>
331 class StaticResource {
333 StaticResource() : is_reserved_(false) {}
336 template <typename S> friend class Access;
342 // Locally scoped access to a static resource.
343 template <typename T>
346 explicit Access(StaticResource<T>* resource)
347 : resource_(resource)
348 , instance_(&resource->instance_) {
349 ASSERT(!resource->is_reserved_);
350 resource->is_reserved_ = true;
354 resource_->is_reserved_ = false;
359 T* value() { return instance_; }
360 T* operator -> () { return instance_; }
363 StaticResource<T>* resource_;
368 // A pointer that can only be set once and doesn't allow NULL values.
370 class SetOncePointer {
372 SetOncePointer() : pointer_(NULL) { }
374 bool is_set() const { return pointer_ != NULL; }
377 ASSERT(pointer_ != NULL);
382 ASSERT(pointer_ == NULL && value != NULL);
391 template <typename T, int kSize>
392 class EmbeddedVector : public Vector<T> {
394 EmbeddedVector() : Vector<T>(buffer_, kSize) { }
396 explicit EmbeddedVector(T initial_value) : Vector<T>(buffer_, kSize) {
397 for (int i = 0; i < kSize; ++i) {
398 buffer_[i] = initial_value;
402 // When copying, make underlying Vector to reference our buffer.
403 EmbeddedVector(const EmbeddedVector& rhs)
405 OS::MemCopy(buffer_, rhs.buffer_, sizeof(T) * kSize);
409 EmbeddedVector& operator=(const EmbeddedVector& rhs) {
410 if (this == &rhs) return *this;
411 Vector<T>::operator=(rhs);
412 OS::MemCopy(buffer_, rhs.buffer_, sizeof(T) * kSize);
413 this->set_start(buffer_);
423 * A class that collects values into a backing store.
424 * Specialized versions of the class can allow access to the backing store
426 * There is no guarantee that the backing store is contiguous (and, as a
427 * consequence, no guarantees that consecutively added elements are adjacent
428 * in memory). The collector may move elements unless it has guaranteed not
431 template <typename T, int growth_factor = 2, int max_growth = 1 * MB>
434 explicit Collector(int initial_capacity = kMinCapacity)
435 : index_(0), size_(0) {
436 current_chunk_ = Vector<T>::New(initial_capacity);
439 virtual ~Collector() {
440 // Free backing store (in reverse allocation order).
441 current_chunk_.Dispose();
442 for (int i = chunks_.length() - 1; i >= 0; i--) {
443 chunks_.at(i).Dispose();
447 // Add a single element.
448 inline void Add(T value) {
449 if (index_ >= current_chunk_.length()) {
452 current_chunk_[index_] = value;
457 // Add a block of contiguous elements and return a Vector backed by the
459 // A basic Collector will keep this vector valid as long as the Collector
461 inline Vector<T> AddBlock(int size, T initial_value) {
463 if (size > current_chunk_.length() - index_) {
466 T* position = current_chunk_.start() + index_;
469 for (int i = 0; i < size; i++) {
470 position[i] = initial_value;
472 return Vector<T>(position, size);
476 // Add a contiguous block of elements and return a vector backed
477 // by the added block.
478 // A basic Collector will keep this vector valid as long as the Collector
480 inline Vector<T> AddBlock(Vector<const T> source) {
481 if (source.length() > current_chunk_.length() - index_) {
482 Grow(source.length());
484 T* position = current_chunk_.start() + index_;
485 index_ += source.length();
486 size_ += source.length();
487 for (int i = 0; i < source.length(); i++) {
488 position[i] = source[i];
490 return Vector<T>(position, source.length());
494 // Write the contents of the collector into the provided vector.
495 void WriteTo(Vector<T> destination) {
496 ASSERT(size_ <= destination.length());
498 for (int i = 0; i < chunks_.length(); i++) {
499 Vector<T> chunk = chunks_.at(i);
500 for (int j = 0; j < chunk.length(); j++) {
501 destination[position] = chunk[j];
505 for (int i = 0; i < index_; i++) {
506 destination[position] = current_chunk_[i];
511 // Allocate a single contiguous vector, copy all the collected
512 // elements to the vector, and return it.
513 // The caller is responsible for freeing the memory of the returned
514 // vector (e.g., using Vector::Dispose).
515 Vector<T> ToVector() {
516 Vector<T> new_store = Vector<T>::New(size_);
521 // Resets the collector to be empty.
522 virtual void Reset();
524 // Total number of elements added to collector so far.
525 inline int size() { return size_; }
528 static const int kMinCapacity = 16;
529 List<Vector<T> > chunks_;
530 Vector<T> current_chunk_; // Block of memory currently being written into.
531 int index_; // Current index in current chunk.
532 int size_; // Total number of elements in collector.
534 // Creates a new current chunk, and stores the old chunk in the chunks_ list.
535 void Grow(int min_capacity) {
536 ASSERT(growth_factor > 1);
538 int current_length = current_chunk_.length();
539 if (current_length < kMinCapacity) {
540 // The collector started out as empty.
541 new_capacity = min_capacity * growth_factor;
542 if (new_capacity < kMinCapacity) new_capacity = kMinCapacity;
544 int growth = current_length * (growth_factor - 1);
545 if (growth > max_growth) {
548 new_capacity = current_length + growth;
549 if (new_capacity < min_capacity) {
550 new_capacity = min_capacity + growth;
553 NewChunk(new_capacity);
554 ASSERT(index_ + min_capacity <= current_chunk_.length());
557 // Before replacing the current chunk, give a subclass the option to move
558 // some of the current data into the new chunk. The function may update
559 // the current index_ value to represent data no longer in the current chunk.
560 // Returns the initial index of the new chunk (after copied data).
561 virtual void NewChunk(int new_capacity) {
562 Vector<T> new_chunk = Vector<T>::New(new_capacity);
564 chunks_.Add(current_chunk_.SubVector(0, index_));
566 current_chunk_.Dispose();
568 current_chunk_ = new_chunk;
575 * A collector that allows sequences of values to be guaranteed to
577 * If the backing store grows while a sequence is active, the current
578 * sequence might be moved, but after the sequence is ended, it will
580 * NOTICE: Blocks allocated using Collector::AddBlock(int) can move
581 * as well, if inside an active sequence where another element is added.
583 template <typename T, int growth_factor = 2, int max_growth = 1 * MB>
584 class SequenceCollector : public Collector<T, growth_factor, max_growth> {
586 explicit SequenceCollector(int initial_capacity)
587 : Collector<T, growth_factor, max_growth>(initial_capacity),
588 sequence_start_(kNoSequence) { }
590 virtual ~SequenceCollector() {}
592 void StartSequence() {
593 ASSERT(sequence_start_ == kNoSequence);
594 sequence_start_ = this->index_;
597 Vector<T> EndSequence() {
598 ASSERT(sequence_start_ != kNoSequence);
599 int sequence_start = sequence_start_;
600 sequence_start_ = kNoSequence;
601 if (sequence_start == this->index_) return Vector<T>();
602 return this->current_chunk_.SubVector(sequence_start, this->index_);
605 // Drops the currently added sequence, and all collected elements in it.
606 void DropSequence() {
607 ASSERT(sequence_start_ != kNoSequence);
608 int sequence_length = this->index_ - sequence_start_;
609 this->index_ = sequence_start_;
610 this->size_ -= sequence_length;
611 sequence_start_ = kNoSequence;
614 virtual void Reset() {
615 sequence_start_ = kNoSequence;
616 this->Collector<T, growth_factor, max_growth>::Reset();
620 static const int kNoSequence = -1;
623 // Move the currently active sequence to the new chunk.
624 virtual void NewChunk(int new_capacity) {
625 if (sequence_start_ == kNoSequence) {
626 // Fall back on default behavior if no sequence has been started.
627 this->Collector<T, growth_factor, max_growth>::NewChunk(new_capacity);
630 int sequence_length = this->index_ - sequence_start_;
631 Vector<T> new_chunk = Vector<T>::New(sequence_length + new_capacity);
632 ASSERT(sequence_length < new_chunk.length());
633 for (int i = 0; i < sequence_length; i++) {
634 new_chunk[i] = this->current_chunk_[sequence_start_ + i];
636 if (sequence_start_ > 0) {
637 this->chunks_.Add(this->current_chunk_.SubVector(0, sequence_start_));
639 this->current_chunk_.Dispose();
641 this->current_chunk_ = new_chunk;
642 this->index_ = sequence_length;
648 // Compare ASCII/16bit chars to ASCII/16bit chars.
649 template <typename lchar, typename rchar>
650 inline int CompareCharsUnsigned(const lchar* lhs,
653 const lchar* limit = lhs + chars;
654 #ifdef V8_HOST_CAN_READ_UNALIGNED
655 if (sizeof(*lhs) == sizeof(*rhs)) {
656 // Number of characters in a uintptr_t.
657 static const int kStepSize = sizeof(uintptr_t) / sizeof(*lhs); // NOLINT
658 while (lhs <= limit - kStepSize) {
659 if (*reinterpret_cast<const uintptr_t*>(lhs) !=
660 *reinterpret_cast<const uintptr_t*>(rhs)) {
668 while (lhs < limit) {
669 int r = static_cast<int>(*lhs) - static_cast<int>(*rhs);
670 if (r != 0) return r;
677 template<typename lchar, typename rchar>
678 inline int CompareChars(const lchar* lhs, const rchar* rhs, int chars) {
679 ASSERT(sizeof(lchar) <= 2);
680 ASSERT(sizeof(rchar) <= 2);
681 if (sizeof(lchar) == 1) {
682 if (sizeof(rchar) == 1) {
683 return CompareCharsUnsigned(reinterpret_cast<const uint8_t*>(lhs),
684 reinterpret_cast<const uint8_t*>(rhs),
687 return CompareCharsUnsigned(reinterpret_cast<const uint8_t*>(lhs),
688 reinterpret_cast<const uint16_t*>(rhs),
692 if (sizeof(rchar) == 1) {
693 return CompareCharsUnsigned(reinterpret_cast<const uint16_t*>(lhs),
694 reinterpret_cast<const uint8_t*>(rhs),
697 return CompareCharsUnsigned(reinterpret_cast<const uint16_t*>(lhs),
698 reinterpret_cast<const uint16_t*>(rhs),
705 // Calculate 10^exponent.
706 inline int TenToThe(int exponent) {
707 ASSERT(exponent <= 9);
708 ASSERT(exponent >= 1);
710 for (int i = 1; i < exponent; i++) answer *= 10;
715 // The type-based aliasing rule allows the compiler to assume that pointers of
716 // different types (for some definition of different) never alias each other.
717 // Thus the following code does not work:
720 // int fbits = *(int*)(&f);
722 // The compiler 'knows' that the int pointer can't refer to f since the types
723 // don't match, so the compiler may cache f in a register, leaving random data
724 // in fbits. Using C++ style casts makes no difference, however a pointer to
725 // char data is assumed to alias any other pointer. This is the 'memcpy
728 // Bit_cast uses the memcpy exception to move the bits from a variable of one
729 // type of a variable of another type. Of course the end result is likely to
730 // be implementation dependent. Most compilers (gcc-4.2 and MSVC 2005)
731 // will completely optimize BitCast away.
733 // There is an additional use for BitCast.
734 // Recent gccs will warn when they see casts that may result in breakage due to
735 // the type-based aliasing rule. If you have checked that there is no breakage
736 // you can use BitCast to cast one pointer type to another. This confuses gcc
737 // enough that it can no longer see that you have cast one pointer type to
738 // another thus avoiding the warning.
740 // We need different implementations of BitCast for pointer and non-pointer
741 // values. We use partial specialization of auxiliary struct to work around
742 // issues with template functions overloading.
743 template <class Dest, class Source>
744 struct BitCastHelper {
745 STATIC_ASSERT(sizeof(Dest) == sizeof(Source));
747 INLINE(static Dest cast(const Source& source)) {
749 memcpy(&dest, &source, sizeof(dest));
754 template <class Dest, class Source>
755 struct BitCastHelper<Dest, Source*> {
756 INLINE(static Dest cast(Source* source)) {
757 return BitCastHelper<Dest, uintptr_t>::
758 cast(reinterpret_cast<uintptr_t>(source));
762 template <class Dest, class Source>
763 INLINE(Dest BitCast(const Source& source));
765 template <class Dest, class Source>
766 inline Dest BitCast(const Source& source) {
767 return BitCastHelper<Dest, Source>::cast(source);
771 template<typename ElementType, int NumElements>
772 class EmbeddedContainer {
774 EmbeddedContainer() : elems_() { }
776 int length() const { return NumElements; }
777 const ElementType& operator[](int i) const {
778 ASSERT(i < length());
781 ElementType& operator[](int i) {
782 ASSERT(i < length());
787 ElementType elems_[NumElements];
791 template<typename ElementType>
792 class EmbeddedContainer<ElementType, 0> {
794 int length() const { return 0; }
795 const ElementType& operator[](int i) const {
797 static ElementType t = 0;
800 ElementType& operator[](int i) {
802 static ElementType t = 0;
808 // Helper class for building result strings in a character buffer. The
809 // purpose of the class is to use safe operations that checks the
810 // buffer bounds on all operations in debug mode.
811 // This simple base class does not allow formatted output.
812 class SimpleStringBuilder {
814 // Create a string builder with a buffer of the given size. The
815 // buffer is allocated through NewArray<char> and must be
816 // deallocated by the caller of Finalize().
817 explicit SimpleStringBuilder(int size);
819 SimpleStringBuilder(char* buffer, int size)
820 : buffer_(buffer, size), position_(0) { }
822 ~SimpleStringBuilder() { if (!is_finalized()) Finalize(); }
824 int size() const { return buffer_.length(); }
826 // Get the current position in the builder.
827 int position() const {
828 ASSERT(!is_finalized());
832 // Reset the position.
833 void Reset() { position_ = 0; }
835 // Add a single character to the builder. It is not allowed to add
836 // 0-characters; use the Finalize() method to terminate the string
838 void AddCharacter(char c) {
840 ASSERT(!is_finalized() && position_ < buffer_.length());
841 buffer_[position_++] = c;
844 // Add an entire string to the builder. Uses strlen() internally to
845 // compute the length of the input string.
846 void AddString(const char* s);
848 // Add the first 'n' characters of the given string 's' to the
849 // builder. The input string must have enough characters.
850 void AddSubstring(const char* s, int n);
852 // Add character padding to the builder. If count is non-positive,
853 // nothing is added to the builder.
854 void AddPadding(char c, int count);
856 // Add the decimal representation of the value.
857 void AddDecimalInteger(int value);
859 // Finalize the string by 0-terminating it and returning the buffer.
863 Vector<char> buffer_;
866 bool is_finalized() const { return position_ < 0; }
869 DISALLOW_IMPLICIT_CONSTRUCTORS(SimpleStringBuilder);
873 // A poor man's version of STL's bitset: A bit set of enums E (without explicit
874 // values), fitting into an integral type T.
875 template <class E, class T = int>
878 explicit EnumSet(T bits = 0) : bits_(bits) {}
879 bool IsEmpty() const { return bits_ == 0; }
880 bool Contains(E element) const { return (bits_ & Mask(element)) != 0; }
881 bool ContainsAnyOf(const EnumSet& set) const {
882 return (bits_ & set.bits_) != 0;
884 void Add(E element) { bits_ |= Mask(element); }
885 void Add(const EnumSet& set) { bits_ |= set.bits_; }
886 void Remove(E element) { bits_ &= ~Mask(element); }
887 void Remove(const EnumSet& set) { bits_ &= ~set.bits_; }
888 void RemoveAll() { bits_ = 0; }
889 void Intersect(const EnumSet& set) { bits_ &= set.bits_; }
890 T ToIntegral() const { return bits_; }
891 bool operator==(const EnumSet& set) { return bits_ == set.bits_; }
892 bool operator!=(const EnumSet& set) { return bits_ != set.bits_; }
893 EnumSet<E, T> operator|(const EnumSet& set) const {
894 return EnumSet<E, T>(bits_ | set.bits_);
898 T Mask(E element) const {
899 // The strange typing in ASSERT is necessary to avoid stupid warnings, see:
900 // http://gcc.gnu.org/bugzilla/show_bug.cgi?id=43680
901 ASSERT(static_cast<int>(element) < static_cast<int>(sizeof(T) * CHAR_BIT));
902 return static_cast<T>(1) << element;
908 // Bit field extraction.
909 inline uint32_t unsigned_bitextract_32(int msb, int lsb, uint32_t x) {
910 return (x >> lsb) & ((1 << (1 + msb - lsb)) - 1);
913 inline uint64_t unsigned_bitextract_64(int msb, int lsb, uint64_t x) {
914 return (x >> lsb) & ((static_cast<uint64_t>(1) << (1 + msb - lsb)) - 1);
917 inline int32_t signed_bitextract_32(int msb, int lsb, int32_t x) {
918 return (x << (31 - msb)) >> (lsb + 31 - msb);
921 inline int signed_bitextract_64(int msb, int lsb, int x) {
922 // TODO(jbramley): This is broken for big bitfields.
923 return (x << (63 - msb)) >> (lsb + 63 - msb);
926 // Check number width.
927 inline bool is_intn(int64_t x, unsigned n) {
928 ASSERT((0 < n) && (n < 64));
929 int64_t limit = static_cast<int64_t>(1) << (n - 1);
930 return (-limit <= x) && (x < limit);
933 inline bool is_uintn(int64_t x, unsigned n) {
934 ASSERT((0 < n) && (n < (sizeof(x) * kBitsPerByte)));
939 inline T truncate_to_intn(T x, unsigned n) {
940 ASSERT((0 < n) && (n < (sizeof(x) * kBitsPerByte)));
941 return (x & ((static_cast<T>(1) << n) - 1));
944 #define INT_1_TO_63_LIST(V) \
945 V(1) V(2) V(3) V(4) V(5) V(6) V(7) V(8) \
946 V(9) V(10) V(11) V(12) V(13) V(14) V(15) V(16) \
947 V(17) V(18) V(19) V(20) V(21) V(22) V(23) V(24) \
948 V(25) V(26) V(27) V(28) V(29) V(30) V(31) V(32) \
949 V(33) V(34) V(35) V(36) V(37) V(38) V(39) V(40) \
950 V(41) V(42) V(43) V(44) V(45) V(46) V(47) V(48) \
951 V(49) V(50) V(51) V(52) V(53) V(54) V(55) V(56) \
952 V(57) V(58) V(59) V(60) V(61) V(62) V(63)
954 #define DECLARE_IS_INT_N(N) \
955 inline bool is_int##N(int64_t x) { return is_intn(x, N); }
956 #define DECLARE_IS_UINT_N(N) \
958 inline bool is_uint##N(T x) { return is_uintn(x, N); }
959 #define DECLARE_TRUNCATE_TO_INT_N(N) \
961 inline T truncate_to_int##N(T x) { return truncate_to_intn(x, N); }
962 INT_1_TO_63_LIST(DECLARE_IS_INT_N)
963 INT_1_TO_63_LIST(DECLARE_IS_UINT_N)
964 INT_1_TO_63_LIST(DECLARE_TRUNCATE_TO_INT_N)
965 #undef DECLARE_IS_INT_N
966 #undef DECLARE_IS_UINT_N
967 #undef DECLARE_TRUNCATE_TO_INT_N
969 class TypeFeedbackId {
971 explicit TypeFeedbackId(int id) : id_(id) { }
972 int ToInt() const { return id_; }
974 static TypeFeedbackId None() { return TypeFeedbackId(kNoneId); }
975 bool IsNone() const { return id_ == kNoneId; }
978 static const int kNoneId = -1;
986 explicit BailoutId(int id) : id_(id) { }
987 int ToInt() const { return id_; }
989 static BailoutId None() { return BailoutId(kNoneId); }
990 static BailoutId FunctionEntry() { return BailoutId(kFunctionEntryId); }
991 static BailoutId Declarations() { return BailoutId(kDeclarationsId); }
992 static BailoutId FirstUsable() { return BailoutId(kFirstUsableId); }
993 static BailoutId StubEntry() { return BailoutId(kStubEntryId); }
995 bool IsNone() const { return id_ == kNoneId; }
996 bool operator==(const BailoutId& other) const { return id_ == other.id_; }
997 bool operator!=(const BailoutId& other) const { return id_ != other.id_; }
1000 static const int kNoneId = -1;
1002 // Using 0 could disguise errors.
1003 static const int kFunctionEntryId = 2;
1005 // This AST id identifies the point after the declarations have been visited.
1006 // We need it to capture the environment effects of declarations that emit
1007 // code (function declarations).
1008 static const int kDeclarationsId = 3;
1010 // Every FunctionState starts with this id.
1011 static const int kFirstUsableId = 4;
1013 // Every compiled stub starts with this id.
1014 static const int kStubEntryId = 5;
1021 class ContainerPointerWrapper {
1023 typedef typename C::iterator iterator;
1024 typedef typename C::reverse_iterator reverse_iterator;
1025 explicit ContainerPointerWrapper(C* container) : container_(container) {}
1026 iterator begin() { return container_->begin(); }
1027 iterator end() { return container_->end(); }
1028 reverse_iterator rbegin() { return container_->rbegin(); }
1029 reverse_iterator rend() { return container_->rend(); }
1035 // ----------------------------------------------------------------------------
1039 // On gcc we can ask the compiler to check the types of %d-style format
1040 // specifiers and their associated arguments. TODO(erikcorry) fix this
1041 // so it works on MacOSX.
1042 #if defined(__MACH__) && defined(__APPLE__)
1043 #define PRINTF_CHECKING
1044 #define FPRINTF_CHECKING
1045 #define PRINTF_METHOD_CHECKING
1046 #define FPRINTF_METHOD_CHECKING
1048 #define PRINTF_CHECKING __attribute__ ((format (printf, 1, 2)))
1049 #define FPRINTF_CHECKING __attribute__ ((format (printf, 2, 3)))
1050 #define PRINTF_METHOD_CHECKING __attribute__ ((format (printf, 2, 3)))
1051 #define FPRINTF_METHOD_CHECKING __attribute__ ((format (printf, 3, 4)))
1054 #define PRINTF_CHECKING
1055 #define FPRINTF_CHECKING
1056 #define PRINTF_METHOD_CHECKING
1057 #define FPRINTF_METHOD_CHECKING
1060 // Our version of printf().
1061 void PRINTF_CHECKING PrintF(const char* format, ...);
1062 void FPRINTF_CHECKING PrintF(FILE* out, const char* format, ...);
1064 // Prepends the current process ID to the output.
1065 void PRINTF_CHECKING PrintPID(const char* format, ...);
1067 // Our version of fflush.
1068 void Flush(FILE* out);
1070 inline void Flush() {
1075 // Read a line of characters after printing the prompt to stdout. The resulting
1076 // char* needs to be disposed off with DeleteArray by the caller.
1077 char* ReadLine(const char* prompt);
1080 // Read and return the raw bytes in a file. the size of the buffer is returned
1082 // The returned buffer must be freed by the caller.
1083 byte* ReadBytes(const char* filename, int* size, bool verbose = true);
1086 // Append size chars from str to the file given by filename.
1087 // The file is overwritten. Returns the number of chars written.
1088 int AppendChars(const char* filename,
1091 bool verbose = true);
1094 // Write size chars from str to the file given by filename.
1095 // The file is overwritten. Returns the number of chars written.
1096 int WriteChars(const char* filename,
1099 bool verbose = true);
1102 // Write size bytes to the file given by filename.
1103 // The file is overwritten. Returns the number of bytes written.
1104 int WriteBytes(const char* filename,
1107 bool verbose = true);
1111 // const char* <varname> = "<str>";
1112 // const int <varname>_len = <len>;
1113 // to the file given by filename. Only the first len chars are written.
1114 int WriteAsCFile(const char* filename, const char* varname,
1115 const char* str, int size, bool verbose = true);
1118 // ----------------------------------------------------------------------------
1121 template <typename T>
1122 inline Vector< Handle<Object> > HandleVector(v8::internal::Handle<T>* elms,
1124 return Vector< Handle<Object> >(
1125 reinterpret_cast<v8::internal::Handle<Object>*>(elms), length);
1129 // ----------------------------------------------------------------------------
1132 // Copies words from |src| to |dst|. The data spans must not overlap.
1133 template <typename T>
1134 inline void CopyWords(T* dst, const T* src, size_t num_words) {
1135 STATIC_ASSERT(sizeof(T) == kPointerSize);
1136 ASSERT(Min(dst, const_cast<T*>(src)) + num_words <=
1137 Max(dst, const_cast<T*>(src)));
1138 ASSERT(num_words > 0);
1140 // Use block copying OS::MemCopy if the segment we're copying is
1141 // enough to justify the extra call/setup overhead.
1142 static const size_t kBlockCopyLimit = 16;
1144 if (num_words < kBlockCopyLimit) {
1148 } while (num_words > 0);
1150 OS::MemCopy(dst, src, num_words * kPointerSize);
1155 // Copies words from |src| to |dst|. No restrictions.
1156 template <typename T>
1157 inline void MoveWords(T* dst, const T* src, size_t num_words) {
1158 STATIC_ASSERT(sizeof(T) == kPointerSize);
1159 ASSERT(num_words > 0);
1161 // Use block copying OS::MemCopy if the segment we're copying is
1162 // enough to justify the extra call/setup overhead.
1163 static const size_t kBlockCopyLimit = 16;
1165 if (num_words < kBlockCopyLimit &&
1166 ((dst < src) || (dst >= (src + num_words * kPointerSize)))) {
1167 T* end = dst + num_words;
1171 } while (num_words > 0);
1173 OS::MemMove(dst, src, num_words * kPointerSize);
1178 // Copies data from |src| to |dst|. The data spans must not overlap.
1179 template <typename T>
1180 inline void CopyBytes(T* dst, const T* src, size_t num_bytes) {
1181 STATIC_ASSERT(sizeof(T) == 1);
1182 ASSERT(Min(dst, const_cast<T*>(src)) + num_bytes <=
1183 Max(dst, const_cast<T*>(src)));
1184 if (num_bytes == 0) return;
1186 // Use block copying OS::MemCopy if the segment we're copying is
1187 // enough to justify the extra call/setup overhead.
1188 static const int kBlockCopyLimit = OS::kMinComplexMemCopy;
1190 if (num_bytes < static_cast<size_t>(kBlockCopyLimit)) {
1194 } while (num_bytes > 0);
1196 OS::MemCopy(dst, src, num_bytes);
1201 template <typename T, typename U>
1202 inline void MemsetPointer(T** dest, U* value, int counter) {
1206 a = b; // Fake assignment to check assignability.
1209 #if V8_HOST_ARCH_IA32
1210 #define STOS "stosl"
1211 #elif V8_HOST_ARCH_X64
1212 #define STOS "stosq"
1214 #if defined(__native_client__)
1215 // This STOS sequence does not validate for x86_64 Native Client.
1216 // Here we #undef STOS to force use of the slower C version.
1217 // TODO(bradchen): Profile V8 and implement a faster REP STOS
1218 // here if the profile indicates it matters.
1222 #if defined(MEMORY_SANITIZER)
1223 // MemorySanitizer does not understand inline assembly.
1227 #if defined(__GNUC__) && defined(STOS)
1231 : "+&c" (counter), "+&D" (dest)
1235 for (int i = 0; i < counter; i++) {
1244 // Simple wrapper that allows an ExternalString to refer to a
1245 // Vector<const char>. Doesn't assume ownership of the data.
1246 class AsciiStringAdapter: public v8::String::ExternalAsciiStringResource {
1248 explicit AsciiStringAdapter(Vector<const char> data) : data_(data) {}
1250 virtual const char* data() const { return data_.start(); }
1252 virtual size_t length() const { return data_.length(); }
1255 Vector<const char> data_;
1259 // Simple support to read a file into a 0-terminated C-string.
1260 // The returned buffer must be freed by the caller.
1261 // On return, *exits tells whether the file existed.
1262 Vector<const char> ReadFile(const char* filename,
1264 bool verbose = true);
1265 Vector<const char> ReadFile(FILE* file,
1267 bool verbose = true);
1270 template <typename sourcechar, typename sinkchar>
1271 INLINE(static void CopyCharsUnsigned(sinkchar* dest,
1272 const sourcechar* src,
1274 #if defined(V8_HOST_ARCH_ARM)
1275 INLINE(void CopyCharsUnsigned(uint8_t* dest, const uint8_t* src, int chars));
1276 INLINE(void CopyCharsUnsigned(uint16_t* dest, const uint8_t* src, int chars));
1277 INLINE(void CopyCharsUnsigned(uint16_t* dest, const uint16_t* src, int chars));
1278 #elif defined(V8_HOST_ARCH_MIPS)
1279 INLINE(void CopyCharsUnsigned(uint8_t* dest, const uint8_t* src, int chars));
1280 INLINE(void CopyCharsUnsigned(uint16_t* dest, const uint16_t* src, int chars));
1283 // Copy from ASCII/16bit chars to ASCII/16bit chars.
1284 template <typename sourcechar, typename sinkchar>
1285 INLINE(void CopyChars(sinkchar* dest, const sourcechar* src, int chars));
1287 template<typename sourcechar, typename sinkchar>
1288 void CopyChars(sinkchar* dest, const sourcechar* src, int chars) {
1289 ASSERT(sizeof(sourcechar) <= 2);
1290 ASSERT(sizeof(sinkchar) <= 2);
1291 if (sizeof(sinkchar) == 1) {
1292 if (sizeof(sourcechar) == 1) {
1293 CopyCharsUnsigned(reinterpret_cast<uint8_t*>(dest),
1294 reinterpret_cast<const uint8_t*>(src),
1297 CopyCharsUnsigned(reinterpret_cast<uint8_t*>(dest),
1298 reinterpret_cast<const uint16_t*>(src),
1302 if (sizeof(sourcechar) == 1) {
1303 CopyCharsUnsigned(reinterpret_cast<uint16_t*>(dest),
1304 reinterpret_cast<const uint8_t*>(src),
1307 CopyCharsUnsigned(reinterpret_cast<uint16_t*>(dest),
1308 reinterpret_cast<const uint16_t*>(src),
1314 template <typename sourcechar, typename sinkchar>
1315 void CopyCharsUnsigned(sinkchar* dest, const sourcechar* src, int chars) {
1316 sinkchar* limit = dest + chars;
1317 #ifdef V8_HOST_CAN_READ_UNALIGNED
1318 if (sizeof(*dest) == sizeof(*src)) {
1319 if (chars >= static_cast<int>(OS::kMinComplexMemCopy / sizeof(*dest))) {
1320 OS::MemCopy(dest, src, chars * sizeof(*dest));
1323 // Number of characters in a uintptr_t.
1324 static const int kStepSize = sizeof(uintptr_t) / sizeof(*dest); // NOLINT
1325 ASSERT(dest + kStepSize > dest); // Check for overflow.
1326 while (dest + kStepSize <= limit) {
1327 *reinterpret_cast<uintptr_t*>(dest) =
1328 *reinterpret_cast<const uintptr_t*>(src);
1334 while (dest < limit) {
1335 *dest++ = static_cast<sinkchar>(*src++);
1340 #if defined(V8_HOST_ARCH_ARM)
1341 void CopyCharsUnsigned(uint8_t* dest, const uint8_t* src, int chars) {
1342 switch (static_cast<unsigned>(chars)) {
1349 memcpy(dest, src, 2);
1352 memcpy(dest, src, 3);
1355 memcpy(dest, src, 4);
1358 memcpy(dest, src, 5);
1361 memcpy(dest, src, 6);
1364 memcpy(dest, src, 7);
1367 memcpy(dest, src, 8);
1370 memcpy(dest, src, 9);
1373 memcpy(dest, src, 10);
1376 memcpy(dest, src, 11);
1379 memcpy(dest, src, 12);
1382 memcpy(dest, src, 13);
1385 memcpy(dest, src, 14);
1388 memcpy(dest, src, 15);
1391 OS::MemCopy(dest, src, chars);
1397 void CopyCharsUnsigned(uint16_t* dest, const uint8_t* src, int chars) {
1398 if (chars >= OS::kMinComplexConvertMemCopy) {
1399 OS::MemCopyUint16Uint8(dest, src, chars);
1401 OS::MemCopyUint16Uint8Wrapper(dest, src, chars);
1406 void CopyCharsUnsigned(uint16_t* dest, const uint16_t* src, int chars) {
1407 switch (static_cast<unsigned>(chars)) {
1414 memcpy(dest, src, 4);
1417 memcpy(dest, src, 6);
1420 memcpy(dest, src, 8);
1423 memcpy(dest, src, 10);
1426 memcpy(dest, src, 12);
1429 memcpy(dest, src, 14);
1432 OS::MemCopy(dest, src, chars * sizeof(*dest));
1438 #elif defined(V8_HOST_ARCH_MIPS)
1439 void CopyCharsUnsigned(uint8_t* dest, const uint8_t* src, int chars) {
1440 if (chars < OS::kMinComplexMemCopy) {
1441 memcpy(dest, src, chars);
1443 OS::MemCopy(dest, src, chars);
1447 void CopyCharsUnsigned(uint16_t* dest, const uint16_t* src, int chars) {
1448 if (chars < OS::kMinComplexMemCopy) {
1449 memcpy(dest, src, chars * sizeof(*dest));
1451 OS::MemCopy(dest, src, chars * sizeof(*dest));
1457 class StringBuilder : public SimpleStringBuilder {
1459 explicit StringBuilder(int size) : SimpleStringBuilder(size) { }
1460 StringBuilder(char* buffer, int size) : SimpleStringBuilder(buffer, size) { }
1462 // Add formatted contents to the builder just like printf().
1463 void AddFormatted(const char* format, ...);
1465 // Add formatted contents like printf based on a va_list.
1466 void AddFormattedList(const char* format, va_list list);
1468 DISALLOW_IMPLICIT_CONSTRUCTORS(StringBuilder);
1472 } } // namespace v8::internal
1474 #endif // V8_UTILS_H_