1 /****************************************************************************
3 ** Copyright (C) 2015 The Qt Company Ltd.
4 ** Contact: http://www.qt.io/licensing/
6 ** This file is part of the QtQml module of the Qt Toolkit.
8 ** $QT_BEGIN_LICENSE:LGPL21$
9 ** Commercial License Usage
10 ** Licensees holding valid commercial Qt licenses may use this file in
11 ** accordance with the commercial license agreement provided with the
12 ** Software or, alternatively, in accordance with the terms contained in
13 ** a written agreement between you and The Qt Company. For licensing terms
14 ** and conditions see http://www.qt.io/terms-conditions. For further
15 ** information use the contact form at http://www.qt.io/contact-us.
17 ** GNU Lesser General Public License Usage
18 ** Alternatively, this file may be used under the terms of the GNU Lesser
19 ** General Public License version 2.1 or version 3 as published by the Free
20 ** Software Foundation and appearing in the file LICENSE.LGPLv21 and
21 ** LICENSE.LGPLv3 included in the packaging of this file. Please review the
22 ** following information to ensure the GNU Lesser General Public License
23 ** requirements will be met: https://www.gnu.org/licenses/lgpl.html and
24 ** http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
26 ** As a special exception, The Qt Company gives you certain additional
27 ** rights. These rights are described in The Qt Company LGPL Exception
28 ** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
32 ****************************************************************************/
34 #include "qv4engine_p.h"
35 #include "qv4object_p.h"
36 #include "qv4objectproto_p.h"
38 #include "qv4qobjectwrapper_p.h"
39 #include <qqmlengine.h>
40 #include "PageAllocation.h"
41 #include "StdLibExtras.h"
51 #include "qv4alloca_p.h"
52 #include "qv4profiling_p.h"
54 #ifdef V4_USE_VALGRIND
55 #include <valgrind/valgrind.h>
56 #include <valgrind/memcheck.h>
60 #include <sys/storage.h> // __tls()
63 #if USE(PTHREADS) && HAVE(PTHREAD_NP_H)
64 #include <pthread_np.h>
73 struct MemoryManager::Data
77 ChunkHeader *nextNonFull;
86 ExecutionEngine *engine;
88 enum { MaxItemSize = 512 };
89 ChunkHeader *nonFullChunks[MaxItemSize/16];
90 uint nChunks[MaxItemSize/16];
91 uint availableItems[MaxItemSize/16];
92 uint allocCount[MaxItemSize/16];
96 std::size_t maxChunkSize;
97 QVector<PageAllocation> heapChunks;
104 Heap::Base *heapObject() {
105 return reinterpret_cast<Heap::Base *>(&data);
109 LargeItem *largeItems;
110 std::size_t totalLargeItemsAllocated;
112 GCDeletable *deletable;
115 #ifdef DETAILED_MM_STATS
116 QVector<unsigned> allocSizeCounters;
117 #endif // DETAILED_MM_STATS
125 , maxChunkSize(32*1024)
127 , totalLargeItemsAllocated(0)
130 memset(nonFullChunks, 0, sizeof(nonFullChunks));
131 memset(nChunks, 0, sizeof(nChunks));
132 memset(availableItems, 0, sizeof(availableItems));
133 memset(allocCount, 0, sizeof(allocCount));
134 aggressiveGC = !qgetenv("QV4_MM_AGGRESSIVE_GC").isEmpty();
135 gcStats = !qgetenv("QV4_MM_STATS").isEmpty();
137 QByteArray overrideMaxShift = qgetenv("QV4_MM_MAXBLOCK_SHIFT");
139 uint override = overrideMaxShift.toUInt(&ok);
140 if (ok && override <= 11 && override > 0)
143 QByteArray maxChunkString = qgetenv("QV4_MM_MAX_CHUNK_SIZE");
144 std::size_t tmpMaxChunkSize = maxChunkString.toUInt(&ok);
146 maxChunkSize = tmpMaxChunkSize;
151 for (QVector<PageAllocation>::iterator i = heapChunks.begin(), ei = heapChunks.end(); i != ei; ++i) {
152 Q_V4_PROFILE_DEALLOC(engine, 0, i->size(), Profiling::HeapPage);
160 bool sweepChunk(MemoryManager::Data::ChunkHeader *header, uint *itemsInUse, ExecutionEngine *engine)
163 Heap::Base *tail = &header->freeItems;
164 // qDebug("chunkStart @ %p, size=%x, pos=%x", header->itemStart, header->itemSize, header->itemSize>>4);
165 #ifdef V4_USE_VALGRIND
166 VALGRIND_DISABLE_ERROR_REPORTING;
168 for (char *item = header->itemStart; item <= header->itemEnd; item += header->itemSize) {
169 Heap::Base *m = reinterpret_cast<Heap::Base *>(item);
170 // qDebug("chunk @ %p, size = %lu, in use: %s, mark bit: %s",
171 // item, m->size, (m->inUse ? "yes" : "no"), (m->markBit ? "true" : "false"));
173 Q_ASSERT((qintptr) item % 16 == 0);
176 Q_ASSERT(m->inUse());
182 // qDebug() << "-- collecting it." << m << tail << m->nextFree();
183 #ifdef V4_USE_VALGRIND
184 VALGRIND_ENABLE_ERROR_REPORTING;
186 if (m->vtable()->destroy)
187 m->vtable()->destroy(m);
189 memset(m, 0, header->itemSize);
190 #ifdef V4_USE_VALGRIND
191 VALGRIND_DISABLE_ERROR_REPORTING;
192 VALGRIND_MEMPOOL_FREE(engine->memoryManager, m);
194 Q_V4_PROFILE_DEALLOC(engine, m, header->itemSize, Profiling::SmallItem);
197 // Relink all free blocks to rewrite references to any released chunk.
198 tail->setNextFree(m);
202 tail->setNextFree(0);
203 #ifdef V4_USE_VALGRIND
204 VALGRIND_ENABLE_ERROR_REPORTING;
211 MemoryManager::MemoryManager(ExecutionEngine *engine)
213 , m_persistentValues(new PersistentValueStorage(engine))
214 , m_weakValues(new PersistentValueStorage(engine))
216 #ifdef V4_USE_VALGRIND
217 VALGRIND_CREATE_MEMPOOL(this, 0, true);
219 m_d->engine = engine;
222 Heap::Base *MemoryManager::allocData(std::size_t size)
224 if (m_d->aggressiveGC)
226 #ifdef DETAILED_MM_STATS
228 #endif // DETAILED_MM_STATS
230 Q_ASSERT(size >= 16);
231 Q_ASSERT(size % 16 == 0);
233 size_t pos = size >> 4;
235 // doesn't fit into a small bucket
236 if (size >= MemoryManager::Data::MaxItemSize) {
237 if (m_d->totalLargeItemsAllocated > 8 * 1024 * 1024)
240 // we use malloc for this
241 MemoryManager::Data::LargeItem *item = static_cast<MemoryManager::Data::LargeItem *>(
242 malloc(Q_V4_PROFILE_ALLOC(m_d->engine, size + sizeof(MemoryManager::Data::LargeItem),
243 Profiling::LargeItem)));
244 memset(item, 0, size + sizeof(MemoryManager::Data::LargeItem));
245 item->next = m_d->largeItems;
247 m_d->largeItems = item;
248 m_d->totalLargeItemsAllocated += size;
249 return item->heapObject();
253 Data::ChunkHeader *header = m_d->nonFullChunks[pos];
255 m = header->freeItems.nextFree();
259 // try to free up space, otherwise allocate
260 if (m_d->allocCount[pos] > (m_d->availableItems[pos] >> 1) && m_d->totalAlloc > (m_d->totalItems >> 1) && !m_d->aggressiveGC) {
262 header = m_d->nonFullChunks[pos];
264 m = header->freeItems.nextFree();
269 // no free item available, allocate a new chunk
271 // allocate larger chunks at a time to avoid excessive GC, but cap at maximum chunk size (2MB by default)
272 uint shift = ++m_d->nChunks[pos];
273 if (shift > m_d->maxShift)
274 shift = m_d->maxShift;
275 std::size_t allocSize = m_d->maxChunkSize*(size_t(1) << shift);
276 allocSize = roundUpToMultipleOf(WTF::pageSize(), allocSize);
277 PageAllocation allocation = PageAllocation::allocate(
278 Q_V4_PROFILE_ALLOC(m_d->engine, allocSize, Profiling::HeapPage),
279 OSAllocator::JSGCHeapPages);
280 m_d->heapChunks.append(allocation);
281 std::sort(m_d->heapChunks.begin(), m_d->heapChunks.end());
283 header = reinterpret_cast<Data::ChunkHeader *>(allocation.base());
284 header->itemSize = int(size);
285 header->itemStart = reinterpret_cast<char *>(allocation.base()) + roundUpToMultipleOf(16, sizeof(Data::ChunkHeader));
286 header->itemEnd = reinterpret_cast<char *>(allocation.base()) + allocation.size() - header->itemSize;
288 header->nextNonFull = m_d->nonFullChunks[pos];
289 m_d->nonFullChunks[pos] = header;
291 Heap::Base *last = &header->freeItems;
292 for (char *item = header->itemStart; item <= header->itemEnd; item += header->itemSize) {
293 Heap::Base *o = reinterpret_cast<Heap::Base *>(item);
294 last->setNextFree(o);
298 last->setNextFree(0);
299 m = header->freeItems.nextFree();
300 const size_t increase = (header->itemEnd - header->itemStart) / header->itemSize;
301 m_d->availableItems[pos] += uint(increase);
302 m_d->totalItems += int(increase);
303 #ifdef V4_USE_VALGRIND
304 VALGRIND_MAKE_MEM_NOACCESS(allocation.base(), allocSize);
305 VALGRIND_MEMPOOL_ALLOC(this, header, sizeof(Data::ChunkHeader));
310 #ifdef V4_USE_VALGRIND
311 VALGRIND_MEMPOOL_ALLOC(this, m, size);
313 Q_V4_PROFILE_ALLOC(m_d->engine, size, Profiling::SmallItem);
315 ++m_d->allocCount[pos];
317 header->freeItems.setNextFree(m->nextFree());
318 if (!header->freeItems.nextFree())
319 m_d->nonFullChunks[pos] = header->nextNonFull;
323 static void drainMarkStack(QV4::ExecutionEngine *engine, Value *markBase)
325 while (engine->jsStackTop > markBase) {
326 Heap::Base *h = engine->popForGC();
327 Q_ASSERT (h->vtable()->markObjects);
328 h->vtable()->markObjects(h, engine);
332 void MemoryManager::mark()
334 Value *markBase = m_d->engine->jsStackTop;
336 m_d->engine->markObjects();
338 m_persistentValues->mark(m_d->engine);
340 collectFromJSStack();
342 // Preserve QObject ownership rules within JavaScript: A parent with c++ ownership
343 // keeps all of its children alive in JavaScript.
345 // Do this _after_ collectFromStack to ensure that processing the weak
346 // managed objects in the loop down there doesn't make then end up as leftovers
347 // on the stack and thus always get collected.
348 for (PersistentValueStorage::Iterator it = m_weakValues->begin(); it != m_weakValues->end(); ++it) {
349 if (!(*it).isManaged())
351 if ((*it).managed()->d()->vtable() != QObjectWrapper::staticVTable())
353 QObjectWrapper *qobjectWrapper = static_cast<QObjectWrapper*>((*it).managed());
356 QObject *qobject = qobjectWrapper->object();
359 bool keepAlive = QQmlData::keepAliveDuringGarbageCollection(qobject);
362 if (QObject *parent = qobject->parent()) {
363 while (parent->parent())
364 parent = parent->parent();
366 keepAlive = QQmlData::keepAliveDuringGarbageCollection(parent);
371 qobjectWrapper->mark(m_d->engine);
373 if (m_d->engine->jsStackTop >= m_d->engine->jsStackLimit)
374 drainMarkStack(m_d->engine, markBase);
377 drainMarkStack(m_d->engine, markBase);
380 void MemoryManager::sweep(bool lastSweep)
383 for (PersistentValueStorage::Iterator it = m_weakValues->begin(); it != m_weakValues->end(); ++it) {
384 if (Managed *m = (*it).as<Managed>()) {
386 (*it) = Primitive::undefinedValue();
391 if (MultiplyWrappedQObjectMap *multiplyWrappedQObjects = m_d->engine->m_multiplyWrappedQObjects) {
392 for (MultiplyWrappedQObjectMap::Iterator it = multiplyWrappedQObjects->begin(); it != multiplyWrappedQObjects->end();) {
393 if (!it.value().isNullOrUndefined())
394 it = multiplyWrappedQObjects->erase(it);
400 bool *chunkIsEmpty = (bool *)alloca(m_d->heapChunks.size() * sizeof(bool));
401 uint itemsInUse[MemoryManager::Data::MaxItemSize/16];
402 memset(itemsInUse, 0, sizeof(itemsInUse));
403 memset(m_d->nonFullChunks, 0, sizeof(m_d->nonFullChunks));
405 for (int i = 0; i < m_d->heapChunks.size(); ++i) {
406 Data::ChunkHeader *header = reinterpret_cast<Data::ChunkHeader *>(m_d->heapChunks[i].base());
407 chunkIsEmpty[i] = sweepChunk(header, &itemsInUse[header->itemSize >> 4], m_d->engine);
410 QVector<PageAllocation>::iterator chunkIter = m_d->heapChunks.begin();
411 for (int i = 0; i < m_d->heapChunks.size(); ++i) {
412 Q_ASSERT(chunkIter != m_d->heapChunks.end());
413 Data::ChunkHeader *header = reinterpret_cast<Data::ChunkHeader *>(chunkIter->base());
414 const size_t pos = header->itemSize >> 4;
415 const size_t decrease = (header->itemEnd - header->itemStart) / header->itemSize;
417 // Release that chunk if it could have been spared since the last GC run without any difference.
418 if (chunkIsEmpty[i] && m_d->availableItems[pos] - decrease >= itemsInUse[pos]) {
419 Q_V4_PROFILE_DEALLOC(m_d->engine, 0, chunkIter->size(), Profiling::HeapPage);
420 #ifdef V4_USE_VALGRIND
421 VALGRIND_MEMPOOL_FREE(this, header);
424 m_d->availableItems[pos] -= uint(decrease);
425 m_d->totalItems -= int(decrease);
426 chunkIter->deallocate();
427 chunkIter = m_d->heapChunks.erase(chunkIter);
429 } else if (header->freeItems.nextFree()) {
430 header->nextNonFull = m_d->nonFullChunks[pos];
431 m_d->nonFullChunks[pos] = header;
436 Data::LargeItem *i = m_d->largeItems;
437 Data::LargeItem **last = &m_d->largeItems;
439 Heap::Base *m = i->heapObject();
440 Q_ASSERT(m->inUse());
447 if (m->vtable()->destroy)
448 m->vtable()->destroy(m);
451 free(Q_V4_PROFILE_DEALLOC(m_d->engine, i, i->size + sizeof(Data::LargeItem),
452 Profiling::LargeItem));
456 GCDeletable *deletable = m_d->deletable;
459 GCDeletable *next = deletable->next;
460 deletable->lastCall = lastSweep;
465 // some execution contexts are allocated on the stack, make sure we clear their markBit as well
467 Heap::ExecutionContext *ctx = engine()->current;
475 bool MemoryManager::isGCBlocked() const
477 return m_d->gcBlocked;
480 void MemoryManager::setGCBlocked(bool blockGC)
482 m_d->gcBlocked = blockGC;
485 void MemoryManager::runGC()
487 if (m_d->gcBlocked) {
488 // qDebug() << "Not running GC.";
496 const size_t totalMem = getAllocatedMem();
501 int markTime = t.elapsed();
503 const size_t usedBefore = getUsedMem();
504 int chunksBefore = m_d->heapChunks.size();
506 const size_t usedAfter = getUsedMem();
507 int sweepTime = t.elapsed();
509 qDebug() << "========== GC ==========";
510 qDebug() << "Marked object in" << markTime << "ms.";
511 qDebug() << "Sweeped object in" << sweepTime << "ms.";
512 qDebug() << "Allocated" << totalMem << "bytes in" << m_d->heapChunks.size() << "chunks.";
513 qDebug() << "Used memory before GC:" << usedBefore;
514 qDebug() << "Used memory after GC:" << usedAfter;
515 qDebug() << "Freed up bytes:" << (usedBefore - usedAfter);
516 qDebug() << "Released chunks:" << (chunksBefore - m_d->heapChunks.size());
517 qDebug() << "======== End GC ========";
520 memset(m_d->allocCount, 0, sizeof(m_d->allocCount));
522 m_d->totalLargeItemsAllocated = 0;
525 size_t MemoryManager::getUsedMem() const
528 for (QVector<PageAllocation>::const_iterator i = m_d->heapChunks.cbegin(), ei = m_d->heapChunks.cend(); i != ei; ++i) {
529 Data::ChunkHeader *header = reinterpret_cast<Data::ChunkHeader *>(i->base());
530 for (char *item = header->itemStart; item <= header->itemEnd; item += header->itemSize) {
531 Heap::Base *m = reinterpret_cast<Heap::Base *>(item);
532 Q_ASSERT((qintptr) item % 16 == 0);
534 usedMem += header->itemSize;
540 size_t MemoryManager::getAllocatedMem() const
543 for (int i = 0; i < m_d->heapChunks.size(); ++i)
544 total += m_d->heapChunks.at(i).size();
548 size_t MemoryManager::getLargeItemsMem() const
551 for (const Data::LargeItem *i = m_d->largeItems; i != 0; i = i->next)
556 MemoryManager::~MemoryManager()
558 delete m_persistentValues;
562 sweep(/*lastSweep*/true);
563 #ifdef V4_USE_VALGRIND
564 VALGRIND_DESTROY_MEMPOOL(this);
568 ExecutionEngine *MemoryManager::engine() const
573 void MemoryManager::dumpStats() const
575 #ifdef DETAILED_MM_STATS
576 std::cerr << "=================" << std::endl;
577 std::cerr << "Allocation stats:" << std::endl;
578 std::cerr << "Requests for each chunk size:" << std::endl;
579 for (int i = 0; i < m_d->allocSizeCounters.size(); ++i) {
580 if (unsigned count = m_d->allocSizeCounters[i]) {
581 std::cerr << "\t" << (i << 4) << " bytes chunks: " << count << std::endl;
584 #endif // DETAILED_MM_STATS
587 void MemoryManager::registerDeletable(GCDeletable *d)
589 d->next = m_d->deletable;
593 #ifdef DETAILED_MM_STATS
594 void MemoryManager::willAllocate(std::size_t size)
596 unsigned alignedSize = (size + 15) >> 4;
597 QVector<unsigned> &counters = m_d->allocSizeCounters;
598 if ((unsigned) counters.size() < alignedSize + 1)
599 counters.resize(alignedSize + 1);
600 counters[alignedSize]++;
603 #endif // DETAILED_MM_STATS
605 void MemoryManager::collectFromJSStack() const
607 Value *v = m_d->engine->jsStackBase;
608 Value *top = m_d->engine->jsStackTop;
610 Managed *m = v->as<Managed>();
612 // Skip pointers to already freed objects, they are bogus as well
613 m->mark(m_d->engine);