2 * Copyright (C) 2006, 2008 Apple Inc. All rights reserved.
3 * Copyright (C) Research In Motion Limited 2009-2010. All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY
15 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
17 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR
18 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
19 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
20 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
21 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
22 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
24 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 #include "platform/SharedBuffer.h"
30 #include "wtf/unicode/Unicode.h"
31 #include "wtf/unicode/UTF8.h"
33 #undef SHARED_BUFFER_STATS
35 #ifdef SHARED_BUFFER_STATS
36 #include "wtf/DataLog.h"
37 #include "wtf/MainThread.h"
44 static const unsigned segmentSize = 0x1000;
45 static const unsigned segmentPositionMask = 0x0FFF;
47 static inline unsigned segmentIndex(unsigned position)
49 return position / segmentSize;
52 static inline unsigned offsetInSegment(unsigned position)
54 return position & segmentPositionMask;
57 static inline char* allocateSegment()
59 return static_cast<char*>(fastMalloc(segmentSize));
62 static inline void freeSegment(char* p)
67 #ifdef SHARED_BUFFER_STATS
69 static Mutex& statsMutex()
71 DEFINE_STATIC_LOCAL(Mutex, mutex, ());
75 static HashSet<SharedBuffer*>& liveBuffers()
77 DEFINE_STATIC_LOCAL(HashSet<SharedBuffer*>, buffers, ());
81 static bool sizeComparator(SharedBuffer* a, SharedBuffer* b)
83 return a->size() > b->size();
86 static CString snippetForBuffer(SharedBuffer* sharedBuffer)
88 const unsigned kMaxSnippetLength = 64;
90 unsigned snippetLength = std::min(sharedBuffer->size(), kMaxSnippetLength);
91 CString result = CString::newUninitialized(snippetLength, snippet);
95 while (unsigned segmentLength = sharedBuffer->getSomeData(segment, offset)) {
96 unsigned length = std::min(segmentLength, snippetLength - offset);
97 memcpy(snippet + offset, segment, length);
98 offset += segmentLength;
99 if (offset >= snippetLength)
103 for (unsigned i = 0; i < snippetLength; ++i) {
104 if (!isASCIIPrintable(snippet[i]))
111 static void printStats(void*)
113 MutexLocker locker(statsMutex());
114 Vector<SharedBuffer*> buffers;
115 for (HashSet<SharedBuffer*>::const_iterator iter = liveBuffers().begin(); iter != liveBuffers().end(); ++iter)
116 buffers.append(*iter);
117 std::sort(buffers.begin(), buffers.end(), sizeComparator);
119 dataLogF("---- Shared Buffer Stats ----\n");
120 for (size_t i = 0; i < buffers.size() && i < 64; ++i) {
121 CString snippet = snippetForBuffer(buffers[i]);
122 dataLogF("Buffer size=%8u %s\n", buffers[i]->size(), snippet.data());
126 static void didCreateSharedBuffer(SharedBuffer* buffer)
128 MutexLocker locker(statsMutex());
129 liveBuffers().add(buffer);
131 callOnMainThread(printStats, 0);
134 static void willDestroySharedBuffer(SharedBuffer* buffer)
136 MutexLocker locker(statsMutex());
137 liveBuffers().remove(buffer);
142 SharedBuffer::SharedBuffer()
144 , m_buffer(PurgeableVector::NotPurgeable)
146 #ifdef SHARED_BUFFER_STATS
147 didCreateSharedBuffer(this);
151 SharedBuffer::SharedBuffer(size_t size)
153 , m_buffer(PurgeableVector::NotPurgeable)
155 m_buffer.reserveCapacity(size);
157 #ifdef SHARED_BUFFER_STATS
158 didCreateSharedBuffer(this);
162 SharedBuffer::SharedBuffer(const char* data, int size)
164 , m_buffer(PurgeableVector::NotPurgeable)
166 // FIXME: Use unsigned consistently, and check for invalid casts when calling into SharedBuffer from other code.
172 #ifdef SHARED_BUFFER_STATS
173 didCreateSharedBuffer(this);
177 SharedBuffer::SharedBuffer(const char* data, int size, PurgeableVector::PurgeableOption purgeable)
179 , m_buffer(purgeable)
181 // FIXME: Use unsigned consistently, and check for invalid casts when calling into SharedBuffer from other code.
187 #ifdef SHARED_BUFFER_STATS
188 didCreateSharedBuffer(this);
192 SharedBuffer::SharedBuffer(const unsigned char* data, int size)
194 , m_buffer(PurgeableVector::NotPurgeable)
196 // FIXME: Use unsigned consistently, and check for invalid casts when calling into SharedBuffer from other code.
200 append(reinterpret_cast<const char*>(data), size);
202 #ifdef SHARED_BUFFER_STATS
203 didCreateSharedBuffer(this);
207 SharedBuffer::~SharedBuffer()
211 #ifdef SHARED_BUFFER_STATS
212 willDestroySharedBuffer(this);
216 PassRefPtr<SharedBuffer> SharedBuffer::adoptVector(Vector<char>& vector)
218 RefPtr<SharedBuffer> buffer = create();
219 buffer->m_buffer.adopt(vector);
220 buffer->m_size = buffer->m_buffer.size();
221 return buffer.release();
224 unsigned SharedBuffer::size() const
229 const char* SharedBuffer::data() const
231 mergeSegmentsIntoBuffer();
232 return m_buffer.data();
235 void SharedBuffer::append(SharedBuffer* data)
239 while (size_t length = data->getSomeData(segment, position)) {
240 append(segment, length);
245 void SharedBuffer::append(const char* data, unsigned length)
251 ASSERT(m_size >= m_buffer.size());
252 unsigned positionInSegment = offsetInSegment(m_size - m_buffer.size());
255 if (m_size <= segmentSize) {
256 // No need to use segments for small resource data.
257 m_buffer.append(data, length);
262 if (!positionInSegment) {
263 segment = allocateSegment();
264 m_segments.append(segment);
266 segment = m_segments.last() + positionInSegment;
268 unsigned segmentFreeSpace = segmentSize - positionInSegment;
269 unsigned bytesToCopy = min(length, segmentFreeSpace);
272 memcpy(segment, data, bytesToCopy);
273 if (static_cast<unsigned>(length) == bytesToCopy)
276 length -= bytesToCopy;
278 segment = allocateSegment();
279 m_segments.append(segment);
280 bytesToCopy = min(length, segmentSize);
284 void SharedBuffer::append(const Vector<char>& data)
286 append(data.data(), data.size());
289 void SharedBuffer::clear()
291 for (unsigned i = 0; i < m_segments.size(); ++i)
292 freeSegment(m_segments[i]);
299 PassRefPtr<SharedBuffer> SharedBuffer::copy() const
301 RefPtr<SharedBuffer> clone(adoptRef(new SharedBuffer));
302 clone->m_size = m_size;
303 clone->m_buffer.reserveCapacity(m_size);
304 clone->m_buffer.append(m_buffer.data(), m_buffer.size());
305 if (!m_segments.isEmpty()) {
306 const char* segment = 0;
307 unsigned position = m_buffer.size();
308 while (unsigned segmentSize = getSomeData(segment, position)) {
309 clone->m_buffer.append(segment, segmentSize);
310 position += segmentSize;
312 ASSERT(position == clone->size());
314 return clone.release();
317 void SharedBuffer::mergeSegmentsIntoBuffer() const
319 unsigned bufferSize = m_buffer.size();
320 if (m_size > bufferSize) {
321 m_buffer.reserveCapacity(m_size);
322 unsigned bytesLeft = m_size - bufferSize;
323 for (unsigned i = 0; i < m_segments.size(); ++i) {
324 unsigned bytesToCopy = min(bytesLeft, segmentSize);
325 m_buffer.append(m_segments[i], bytesToCopy);
326 bytesLeft -= bytesToCopy;
327 freeSegment(m_segments[i]);
333 unsigned SharedBuffer::getSomeData(const char*& someData, unsigned position) const
336 unsigned totalSize = size();
337 if (position >= totalSize) {
342 ASSERT_WITH_SECURITY_IMPLICATION(position < m_size);
343 unsigned consecutiveSize = m_buffer.size();
344 if (position < consecutiveSize) {
345 someData = m_buffer.data() + position;
346 return consecutiveSize - position;
349 position -= consecutiveSize;
350 unsigned segments = m_segments.size();
351 unsigned maxSegmentedSize = segments * segmentSize;
352 unsigned segment = segmentIndex(position);
353 if (segment < segments) {
354 unsigned bytesLeft = totalSize - consecutiveSize;
355 unsigned segmentedSize = min(maxSegmentedSize, bytesLeft);
357 unsigned positionInSegment = offsetInSegment(position);
358 someData = m_segments[segment] + positionInSegment;
359 return segment == segments - 1 ? segmentedSize - position : segmentSize - positionInSegment;
361 ASSERT_NOT_REACHED();
365 PassRefPtr<ArrayBuffer> SharedBuffer::getAsArrayBuffer() const
367 RefPtr<ArrayBuffer> arrayBuffer = ArrayBuffer::createUninitialized(static_cast<unsigned>(size()), 1);
372 const char* segment = 0;
373 unsigned position = 0;
374 while (unsigned segmentSize = getSomeData(segment, position)) {
375 memcpy(static_cast<char*>(arrayBuffer->data()) + position, segment, segmentSize);
376 position += segmentSize;
379 if (position != arrayBuffer->byteLength()) {
380 ASSERT_NOT_REACHED();
381 // Don't return the incomplete ArrayBuffer.
388 PassRefPtr<SkData> SharedBuffer::getAsSkData() const
390 unsigned bufferLength = size();
391 char* buffer = static_cast<char*>(sk_malloc_throw(bufferLength));
392 const char* segment = 0;
393 unsigned position = 0;
394 while (unsigned segmentSize = getSomeData(segment, position)) {
395 memcpy(buffer + position, segment, segmentSize);
396 position += segmentSize;
399 if (position != bufferLength) {
400 ASSERT_NOT_REACHED();
401 // Don't return the incomplete SkData.
404 return adoptRef(SkData::NewFromMalloc(buffer, bufferLength));
407 bool SharedBuffer::lock()
409 return m_buffer.lock();
412 void SharedBuffer::unlock()
414 mergeSegmentsIntoBuffer();
418 bool SharedBuffer::isLocked() const
420 return m_buffer.isLocked();
423 } // namespace WebCore