2 * Copyright (C) 2006, 2008 Apple Inc. All rights reserved.
3 * Copyright (C) Research In Motion Limited 2009-2010. All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY
15 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
17 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR
18 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
19 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
20 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
21 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
22 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
24 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 #include "platform/SharedBuffer.h"
30 #include "wtf/unicode/Unicode.h"
31 #include "wtf/unicode/UTF8.h"
33 #undef SHARED_BUFFER_STATS
35 #ifdef SHARED_BUFFER_STATS
36 #include "wtf/DataLog.h"
37 #include "wtf/MainThread.h"
42 static const unsigned segmentSize = 0x1000;
43 static const unsigned segmentPositionMask = 0x0FFF;
45 static inline unsigned segmentIndex(unsigned position)
47 return position / segmentSize;
50 static inline unsigned offsetInSegment(unsigned position)
52 return position & segmentPositionMask;
55 static inline char* allocateSegment()
57 return static_cast<char*>(fastMalloc(segmentSize));
60 static inline void freeSegment(char* p)
65 #ifdef SHARED_BUFFER_STATS
67 static Mutex& statsMutex()
69 DEFINE_STATIC_LOCAL(Mutex, mutex, ());
73 static HashSet<SharedBuffer*>& liveBuffers()
75 DEFINE_STATIC_LOCAL(HashSet<SharedBuffer*>, buffers, ());
79 static bool sizeComparator(SharedBuffer* a, SharedBuffer* b)
81 return a->size() > b->size();
84 static CString snippetForBuffer(SharedBuffer* sharedBuffer)
86 const unsigned kMaxSnippetLength = 64;
88 unsigned snippetLength = std::min(sharedBuffer->size(), kMaxSnippetLength);
89 CString result = CString::newUninitialized(snippetLength, snippet);
93 while (unsigned segmentLength = sharedBuffer->getSomeData(segment, offset)) {
94 unsigned length = std::min(segmentLength, snippetLength - offset);
95 memcpy(snippet + offset, segment, length);
96 offset += segmentLength;
97 if (offset >= snippetLength)
101 for (unsigned i = 0; i < snippetLength; ++i) {
102 if (!isASCIIPrintable(snippet[i]))
109 static void printStats(void*)
111 MutexLocker locker(statsMutex());
112 Vector<SharedBuffer*> buffers;
113 for (HashSet<SharedBuffer*>::const_iterator iter = liveBuffers().begin(); iter != liveBuffers().end(); ++iter)
114 buffers.append(*iter);
115 std::sort(buffers.begin(), buffers.end(), sizeComparator);
117 dataLogF("---- Shared Buffer Stats ----\n");
118 for (size_t i = 0; i < buffers.size() && i < 64; ++i) {
119 CString snippet = snippetForBuffer(buffers[i]);
120 dataLogF("Buffer size=%8u %s\n", buffers[i]->size(), snippet.data());
124 static void didCreateSharedBuffer(SharedBuffer* buffer)
126 MutexLocker locker(statsMutex());
127 liveBuffers().add(buffer);
129 callOnMainThread(printStats, 0);
132 static void willDestroySharedBuffer(SharedBuffer* buffer)
134 MutexLocker locker(statsMutex());
135 liveBuffers().remove(buffer);
140 SharedBuffer::SharedBuffer()
142 , m_buffer(PurgeableVector::NotPurgeable)
144 #ifdef SHARED_BUFFER_STATS
145 didCreateSharedBuffer(this);
149 SharedBuffer::SharedBuffer(size_t size)
151 , m_buffer(PurgeableVector::NotPurgeable)
153 m_buffer.reserveCapacity(size);
155 #ifdef SHARED_BUFFER_STATS
156 didCreateSharedBuffer(this);
160 SharedBuffer::SharedBuffer(const char* data, int size)
162 , m_buffer(PurgeableVector::NotPurgeable)
164 // FIXME: Use unsigned consistently, and check for invalid casts when calling into SharedBuffer from other code.
170 #ifdef SHARED_BUFFER_STATS
171 didCreateSharedBuffer(this);
175 SharedBuffer::SharedBuffer(const char* data, unsigned size, PurgeableVector::PurgeableOption purgeable)
177 , m_buffer(purgeable)
181 #ifdef SHARED_BUFFER_STATS
182 didCreateSharedBuffer(this);
186 SharedBuffer::SharedBuffer(const unsigned char* data, int size)
188 , m_buffer(PurgeableVector::NotPurgeable)
190 // FIXME: Use unsigned consistently, and check for invalid casts when calling into SharedBuffer from other code.
194 append(reinterpret_cast<const char*>(data), size);
196 #ifdef SHARED_BUFFER_STATS
197 didCreateSharedBuffer(this);
201 SharedBuffer::~SharedBuffer()
205 #ifdef SHARED_BUFFER_STATS
206 willDestroySharedBuffer(this);
210 PassRefPtr<SharedBuffer> SharedBuffer::adoptVector(Vector<char>& vector)
212 RefPtr<SharedBuffer> buffer = create();
213 buffer->m_buffer.adopt(vector);
214 buffer->m_size = buffer->m_buffer.size();
215 return buffer.release();
218 unsigned SharedBuffer::size() const
223 const char* SharedBuffer::data() const
225 mergeSegmentsIntoBuffer();
226 return m_buffer.data();
229 void SharedBuffer::append(PassRefPtr<SharedBuffer> data)
233 while (size_t length = data->getSomeData(segment, position)) {
234 append(segment, length);
239 void SharedBuffer::append(const char* data, unsigned length)
245 ASSERT(m_size >= m_buffer.size());
246 unsigned positionInSegment = offsetInSegment(m_size - m_buffer.size());
249 if (m_size <= segmentSize) {
250 // No need to use segments for small resource data.
251 m_buffer.append(data, length);
256 if (!positionInSegment) {
257 segment = allocateSegment();
258 m_segments.append(segment);
260 segment = m_segments.last() + positionInSegment;
262 unsigned segmentFreeSpace = segmentSize - positionInSegment;
263 unsigned bytesToCopy = std::min(length, segmentFreeSpace);
266 memcpy(segment, data, bytesToCopy);
267 if (static_cast<unsigned>(length) == bytesToCopy)
270 length -= bytesToCopy;
272 segment = allocateSegment();
273 m_segments.append(segment);
274 bytesToCopy = std::min(length, segmentSize);
278 void SharedBuffer::append(const Vector<char>& data)
280 append(data.data(), data.size());
283 void SharedBuffer::clear()
285 for (unsigned i = 0; i < m_segments.size(); ++i)
286 freeSegment(m_segments[i]);
293 PassRefPtr<SharedBuffer> SharedBuffer::copy() const
295 RefPtr<SharedBuffer> clone(adoptRef(new SharedBuffer));
296 clone->m_size = m_size;
297 clone->m_buffer.reserveCapacity(m_size);
298 clone->m_buffer.append(m_buffer.data(), m_buffer.size());
299 if (!m_segments.isEmpty()) {
300 const char* segment = 0;
301 unsigned position = m_buffer.size();
302 while (unsigned segmentSize = getSomeData(segment, position)) {
303 clone->m_buffer.append(segment, segmentSize);
304 position += segmentSize;
306 ASSERT(position == clone->size());
308 return clone.release();
311 void SharedBuffer::mergeSegmentsIntoBuffer() const
313 unsigned bufferSize = m_buffer.size();
314 if (m_size > bufferSize) {
315 m_buffer.reserveCapacity(m_size);
316 unsigned bytesLeft = m_size - bufferSize;
317 for (unsigned i = 0; i < m_segments.size(); ++i) {
318 unsigned bytesToCopy = std::min(bytesLeft, segmentSize);
319 m_buffer.append(m_segments[i], bytesToCopy);
320 bytesLeft -= bytesToCopy;
321 freeSegment(m_segments[i]);
327 unsigned SharedBuffer::getSomeData(const char*& someData, unsigned position) const
330 unsigned totalSize = size();
331 if (position >= totalSize) {
336 ASSERT_WITH_SECURITY_IMPLICATION(position < m_size);
337 unsigned consecutiveSize = m_buffer.size();
338 if (position < consecutiveSize) {
339 someData = m_buffer.data() + position;
340 return consecutiveSize - position;
343 position -= consecutiveSize;
344 unsigned segments = m_segments.size();
345 unsigned maxSegmentedSize = segments * segmentSize;
346 unsigned segment = segmentIndex(position);
347 if (segment < segments) {
348 unsigned bytesLeft = totalSize - consecutiveSize;
349 unsigned segmentedSize = std::min(maxSegmentedSize, bytesLeft);
351 unsigned positionInSegment = offsetInSegment(position);
352 someData = m_segments[segment] + positionInSegment;
353 return segment == segments - 1 ? segmentedSize - position : segmentSize - positionInSegment;
355 ASSERT_NOT_REACHED();
359 PassRefPtr<ArrayBuffer> SharedBuffer::getAsArrayBuffer() const
361 RefPtr<ArrayBuffer> arrayBuffer = ArrayBuffer::createUninitialized(static_cast<unsigned>(size()), 1);
366 const char* segment = 0;
367 unsigned position = 0;
368 while (unsigned segmentSize = getSomeData(segment, position)) {
369 memcpy(static_cast<char*>(arrayBuffer->data()) + position, segment, segmentSize);
370 position += segmentSize;
373 if (position != arrayBuffer->byteLength()) {
374 ASSERT_NOT_REACHED();
375 // Don't return the incomplete ArrayBuffer.
382 PassRefPtr<SkData> SharedBuffer::getAsSkData() const
384 unsigned bufferLength = size();
385 SkData* data = SkData::NewUninitialized(bufferLength);
386 char* buffer = static_cast<char*>(data->writable_data());
387 const char* segment = 0;
388 unsigned position = 0;
389 while (unsigned segmentSize = getSomeData(segment, position)) {
390 memcpy(buffer + position, segment, segmentSize);
391 position += segmentSize;
394 if (position != bufferLength) {
395 ASSERT_NOT_REACHED();
396 // Don't return the incomplete SkData.
399 return adoptRef(data);
402 bool SharedBuffer::lock()
404 return m_buffer.lock();
407 void SharedBuffer::unlock()
409 mergeSegmentsIntoBuffer();
413 bool SharedBuffer::isLocked() const
415 return m_buffer.isLocked();