2 * Copyright (C) 2006, 2008 Apple Inc. All rights reserved.
3 * Copyright (C) Research In Motion Limited 2009-2010. All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY
15 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
17 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR
18 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
19 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
20 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
21 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
22 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
24 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 #include "platform/SharedBuffer.h"
30 #include "wtf/unicode/Unicode.h"
31 #include "wtf/unicode/UTF8.h"
33 #undef SHARED_BUFFER_STATS
35 #ifdef SHARED_BUFFER_STATS
36 #include "wtf/DataLog.h"
37 #include "wtf/MainThread.h"
42 static const unsigned segmentSize = 0x1000;
43 static const unsigned segmentPositionMask = 0x0FFF;
45 static inline unsigned segmentIndex(unsigned position)
47 return position / segmentSize;
50 static inline unsigned offsetInSegment(unsigned position)
52 return position & segmentPositionMask;
55 static inline char* allocateSegment()
57 return static_cast<char*>(fastMalloc(segmentSize));
60 static inline void freeSegment(char* p)
65 #ifdef SHARED_BUFFER_STATS
67 static Mutex& statsMutex()
69 DEFINE_STATIC_LOCAL(Mutex, mutex, ());
73 static HashSet<SharedBuffer*>& liveBuffers()
75 DEFINE_STATIC_LOCAL(HashSet<SharedBuffer*>, buffers, ());
79 static bool sizeComparator(SharedBuffer* a, SharedBuffer* b)
81 return a->size() > b->size();
84 static CString snippetForBuffer(SharedBuffer* sharedBuffer)
86 const unsigned kMaxSnippetLength = 64;
88 unsigned snippetLength = std::min(sharedBuffer->size(), kMaxSnippetLength);
89 CString result = CString::newUninitialized(snippetLength, snippet);
93 while (unsigned segmentLength = sharedBuffer->getSomeData(segment, offset)) {
94 unsigned length = std::min(segmentLength, snippetLength - offset);
95 memcpy(snippet + offset, segment, length);
96 offset += segmentLength;
97 if (offset >= snippetLength)
101 for (unsigned i = 0; i < snippetLength; ++i) {
102 if (!isASCIIPrintable(snippet[i]))
109 static void printStats(void*)
111 MutexLocker locker(statsMutex());
112 Vector<SharedBuffer*> buffers;
113 for (HashSet<SharedBuffer*>::const_iterator iter = liveBuffers().begin(); iter != liveBuffers().end(); ++iter)
114 buffers.append(*iter);
115 std::sort(buffers.begin(), buffers.end(), sizeComparator);
117 dataLogF("---- Shared Buffer Stats ----\n");
118 for (size_t i = 0; i < buffers.size() && i < 64; ++i) {
119 CString snippet = snippetForBuffer(buffers[i]);
120 dataLogF("Buffer size=%8u %s\n", buffers[i]->size(), snippet.data());
124 static void didCreateSharedBuffer(SharedBuffer* buffer)
126 MutexLocker locker(statsMutex());
127 liveBuffers().add(buffer);
129 callOnMainThread(printStats, 0);
132 static void willDestroySharedBuffer(SharedBuffer* buffer)
134 MutexLocker locker(statsMutex());
135 liveBuffers().remove(buffer);
140 SharedBuffer::SharedBuffer()
142 , m_buffer(PurgeableVector::NotPurgeable)
144 #ifdef SHARED_BUFFER_STATS
145 didCreateSharedBuffer(this);
149 SharedBuffer::SharedBuffer(size_t size)
151 , m_buffer(PurgeableVector::NotPurgeable)
153 m_buffer.reserveCapacity(size);
155 #ifdef SHARED_BUFFER_STATS
156 didCreateSharedBuffer(this);
160 SharedBuffer::SharedBuffer(const char* data, int size)
162 , m_buffer(PurgeableVector::NotPurgeable)
164 // FIXME: Use unsigned consistently, and check for invalid casts when calling into SharedBuffer from other code.
170 #ifdef SHARED_BUFFER_STATS
171 didCreateSharedBuffer(this);
175 SharedBuffer::SharedBuffer(const char* data, int size, PurgeableVector::PurgeableOption purgeable)
177 , m_buffer(purgeable)
179 // FIXME: Use unsigned consistently, and check for invalid casts when calling into SharedBuffer from other code.
185 #ifdef SHARED_BUFFER_STATS
186 didCreateSharedBuffer(this);
190 SharedBuffer::SharedBuffer(const unsigned char* data, int size)
192 , m_buffer(PurgeableVector::NotPurgeable)
194 // FIXME: Use unsigned consistently, and check for invalid casts when calling into SharedBuffer from other code.
198 append(reinterpret_cast<const char*>(data), size);
200 #ifdef SHARED_BUFFER_STATS
201 didCreateSharedBuffer(this);
205 SharedBuffer::~SharedBuffer()
209 #ifdef SHARED_BUFFER_STATS
210 willDestroySharedBuffer(this);
214 PassRefPtr<SharedBuffer> SharedBuffer::adoptVector(Vector<char>& vector)
216 RefPtr<SharedBuffer> buffer = create();
217 buffer->m_buffer.adopt(vector);
218 buffer->m_size = buffer->m_buffer.size();
219 return buffer.release();
222 unsigned SharedBuffer::size() const
227 const char* SharedBuffer::data() const
229 mergeSegmentsIntoBuffer();
230 return m_buffer.data();
233 void SharedBuffer::append(PassRefPtr<SharedBuffer> data)
237 while (size_t length = data->getSomeData(segment, position)) {
238 append(segment, length);
243 void SharedBuffer::append(const char* data, unsigned length)
249 ASSERT(m_size >= m_buffer.size());
250 unsigned positionInSegment = offsetInSegment(m_size - m_buffer.size());
253 if (m_size <= segmentSize) {
254 // No need to use segments for small resource data.
255 m_buffer.append(data, length);
260 if (!positionInSegment) {
261 segment = allocateSegment();
262 m_segments.append(segment);
264 segment = m_segments.last() + positionInSegment;
266 unsigned segmentFreeSpace = segmentSize - positionInSegment;
267 unsigned bytesToCopy = std::min(length, segmentFreeSpace);
270 memcpy(segment, data, bytesToCopy);
271 if (static_cast<unsigned>(length) == bytesToCopy)
274 length -= bytesToCopy;
276 segment = allocateSegment();
277 m_segments.append(segment);
278 bytesToCopy = std::min(length, segmentSize);
282 void SharedBuffer::append(const Vector<char>& data)
284 append(data.data(), data.size());
287 void SharedBuffer::clear()
289 for (unsigned i = 0; i < m_segments.size(); ++i)
290 freeSegment(m_segments[i]);
297 PassRefPtr<SharedBuffer> SharedBuffer::copy() const
299 RefPtr<SharedBuffer> clone(adoptRef(new SharedBuffer));
300 clone->m_size = m_size;
301 clone->m_buffer.reserveCapacity(m_size);
302 clone->m_buffer.append(m_buffer.data(), m_buffer.size());
303 if (!m_segments.isEmpty()) {
304 const char* segment = 0;
305 unsigned position = m_buffer.size();
306 while (unsigned segmentSize = getSomeData(segment, position)) {
307 clone->m_buffer.append(segment, segmentSize);
308 position += segmentSize;
310 ASSERT(position == clone->size());
312 return clone.release();
315 void SharedBuffer::mergeSegmentsIntoBuffer() const
317 unsigned bufferSize = m_buffer.size();
318 if (m_size > bufferSize) {
319 m_buffer.reserveCapacity(m_size);
320 unsigned bytesLeft = m_size - bufferSize;
321 for (unsigned i = 0; i < m_segments.size(); ++i) {
322 unsigned bytesToCopy = std::min(bytesLeft, segmentSize);
323 m_buffer.append(m_segments[i], bytesToCopy);
324 bytesLeft -= bytesToCopy;
325 freeSegment(m_segments[i]);
331 unsigned SharedBuffer::getSomeData(const char*& someData, unsigned position) const
334 unsigned totalSize = size();
335 if (position >= totalSize) {
340 ASSERT_WITH_SECURITY_IMPLICATION(position < m_size);
341 unsigned consecutiveSize = m_buffer.size();
342 if (position < consecutiveSize) {
343 someData = m_buffer.data() + position;
344 return consecutiveSize - position;
347 position -= consecutiveSize;
348 unsigned segments = m_segments.size();
349 unsigned maxSegmentedSize = segments * segmentSize;
350 unsigned segment = segmentIndex(position);
351 if (segment < segments) {
352 unsigned bytesLeft = totalSize - consecutiveSize;
353 unsigned segmentedSize = std::min(maxSegmentedSize, bytesLeft);
355 unsigned positionInSegment = offsetInSegment(position);
356 someData = m_segments[segment] + positionInSegment;
357 return segment == segments - 1 ? segmentedSize - position : segmentSize - positionInSegment;
359 ASSERT_NOT_REACHED();
363 PassRefPtr<ArrayBuffer> SharedBuffer::getAsArrayBuffer() const
365 RefPtr<ArrayBuffer> arrayBuffer = ArrayBuffer::createUninitialized(static_cast<unsigned>(size()), 1);
370 const char* segment = 0;
371 unsigned position = 0;
372 while (unsigned segmentSize = getSomeData(segment, position)) {
373 memcpy(static_cast<char*>(arrayBuffer->data()) + position, segment, segmentSize);
374 position += segmentSize;
377 if (position != arrayBuffer->byteLength()) {
378 ASSERT_NOT_REACHED();
379 // Don't return the incomplete ArrayBuffer.
386 PassRefPtr<SkData> SharedBuffer::getAsSkData() const
388 unsigned bufferLength = size();
389 SkData* data = SkData::NewUninitialized(bufferLength);
390 char* buffer = static_cast<char*>(data->writable_data());
391 const char* segment = 0;
392 unsigned position = 0;
393 while (unsigned segmentSize = getSomeData(segment, position)) {
394 memcpy(buffer + position, segment, segmentSize);
395 position += segmentSize;
398 if (position != bufferLength) {
399 ASSERT_NOT_REACHED();
400 // Don't return the incomplete SkData.
403 return adoptRef(data);
406 bool SharedBuffer::lock()
408 return m_buffer.lock();
411 void SharedBuffer::unlock()
413 mergeSegmentsIntoBuffer();
417 bool SharedBuffer::isLocked() const
419 return m_buffer.isLocked();