mac:HEADERS += arch/qatomic_macosx.h \
arch/qatomic_generic.h
-symbian:HEADERS += arch/qatomic_symbian.h \
- arch/qatomic_generic.h
-
vxworks:HEADERS += arch/qatomic_vxworks.h
integrity:HEADERS += arch/qatomic_integrity.h
DEPENDPATH += $$QT_ARCH_CPP
!isEmpty(QT_ARCH) {
include($$QT_ARCH_CPP/arch.pri, "", true)
-}
\ No newline at end of file
+}
# include "QtCore/qatomic_windowsce.h"
#elif defined(QT_ARCH_X86_64)
# include "QtCore/qatomic_x86_64.h"
-#elif defined(QT_ARCH_SYMBIAN)
-# include "QtCore/qatomic_symbian.h"
#elif defined(QT_ARCH_SH)
# include "QtCore/qatomic_sh.h"
#elif defined(QT_ARCH_SH4A)
+++ /dev/null
-#
-# Symbian architecture
-#
-SOURCES += $$QT_ARCH_CPP/qatomic_symbian.cpp \
- $$QT_ARCH_CPP/qatomic_generic_armv6.cpp \
- $$QT_ARCH_CPP/heap_hybrid.cpp \
- $$QT_ARCH_CPP/debugfunction.cpp \
- $$QT_ARCH_CPP/qt_heapsetup_symbian.cpp
-
-HEADERS += $$QT_ARCH_CPP/dla_p.h \
- $$QT_ARCH_CPP/heap_hybrid_p.h \
- $$QT_ARCH_CPP/common_p.h \
- $$QT_ARCH_CPP/page_alloc_p.h \
- $$QT_ARCH_CPP/slab_p.h \
- $$QT_ARCH_CPP/qt_hybridHeap_symbian_p.h
-
-exists($${EPOCROOT}epoc32/include/platform/u32std.h):DEFINES += QT_SYMBIAN_HAVE_U32STD_H
-exists($${EPOCROOT}epoc32/include/platform/e32btrace.h):DEFINES += QT_SYMBIAN_HAVE_E32BTRACE_H
+++ /dev/null
-/****************************************************************************
-**
-** Copyright (C) 2011 Nokia Corporation and/or its subsidiary(-ies).
-** All rights reserved.
-** Contact: Nokia Corporation (qt-info@nokia.com)
-**
-** This file is part of the QtCore module of the Qt Toolkit.
-**
-** $QT_BEGIN_LICENSE:LGPL$
-** GNU Lesser General Public License Usage
-** This file may be used under the terms of the GNU Lesser General Public
-** License version 2.1 as published by the Free Software Foundation and
-** appearing in the file LICENSE.LGPL included in the packaging of this
-** file. Please review the following information to ensure the GNU Lesser
-** General Public License version 2.1 requirements will be met:
-** http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
-**
-** In addition, as a special exception, Nokia gives you certain additional
-** rights. These rights are described in the Nokia Qt LGPL Exception
-** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
-**
-** GNU General Public License Usage
-** Alternatively, this file may be used under the terms of the GNU General
-** Public License version 3.0 as published by the Free Software Foundation
-** and appearing in the file LICENSE.GPL included in the packaging of this
-** file. Please review the following information to ensure the GNU General
-** Public License version 3.0 requirements will be met:
-** http://www.gnu.org/copyleft/gpl.html.
-**
-** Other Usage
-** Alternatively, this file may be used in accordance with the terms and
-** conditions contained in a signed written agreement between you and Nokia.
-**
-**
-**
-**
-**
-** $QT_END_LICENSE$
-**
-****************************************************************************/
-
-#ifndef __E32_COMMON_H__
-#define __E32_COMMON_H__
-
-#ifdef __KERNEL_MODE__
-#include <e32cmn.h>
-#include <e32panic.h>
-#include "u32std.h"
-#else
-#include <e32std.h>
-#include <e32base.h>
-#include <e32math.h>
-#include <e32svr.h>
-#include <e32ver.h>
-#include <e32hal.h>
-#include <e32panic.h>
-// backport of Symbian^4 allocator to Symbian^3 SDK does not contain u32exec.h
-//#include <u32exec.h>
-#endif
-
-GLREF_C void Panic(TCdtPanic aPanic);
-GLDEF_C void PanicBadArrayIndex();
-GLREF_C TInt __DoConvertNum(TUint, TRadix, TUint, TUint8*&);
-GLREF_C TInt __DoConvertNum(Uint64, TRadix, TUint, TUint8*&);
-
-#ifdef __KERNEL_MODE__
-GLREF_C void KernHeapFault(TCdtPanic aPanic);
-GLREF_C void KHeapCheckThreadState();
-TInt StringLength(const TUint16* aPtr);
-TInt StringLength(const TUint8* aPtr);
-
-#define STD_CLASS Kern
-#define STRING_LENGTH(s) StringLength(s)
-#define STRING_LENGTH_16(s) StringLength(s)
-#define PANIC_CURRENT_THREAD(c,r) Kern::PanicCurrentThread(c, r)
-#define __KERNEL_CHECK_RADIX(r) __ASSERT_ALWAYS(((r)==EDecimal)||((r)==EHex),Panic(EInvalidRadix))
-#define APPEND_BUF_SIZE 10
-#define APPEND_BUF_SIZE_64 20
-#define HEAP_PANIC(r) Kern::Printf("HEAP CORRUPTED %s %d", __FILE__, __LINE__), RHeapK::Fault(r)
-#define GET_PAGE_SIZE(x) x = M::PageSizeInBytes()
-#define DIVISION_BY_ZERO() FAULT()
-
-#ifdef _DEBUG
-#define __CHECK_THREAD_STATE RHeapK::CheckThreadState()
-#else
-#define __CHECK_THREAD_STATE
-#endif
-
-#else
-
-#define STD_CLASS User
-#define STRING_LENGTH(s) User::StringLength(s)
-#define STRING_LENGTH_16(s) User::StringLength(s)
-#define PANIC_CURRENT_THREAD(c,r) User::Panic(c, r)
-#define MEM_COMPARE_16 Mem::Compare
-#define __KERNEL_CHECK_RADIX(r)
-#define APPEND_BUF_SIZE 32
-#define APPEND_BUF_SIZE_64 64
-#define HEAP_PANIC(r) RDebug::Printf("HEAP CORRUPTED %s %d", __FILE__, __LINE__), Panic(r)
-#define GET_PAGE_SIZE(x) UserHal::PageSizeInBytes(x)
-#define DIVISION_BY_ZERO() User::RaiseException(EExcIntegerDivideByZero)
-#define __CHECK_THREAD_STATE
-
-#endif // __KERNEL_MODE__
-
-#endif
+++ /dev/null
-/****************************************************************************
-**
-** Copyright (C) 2011 Nokia Corporation and/or its subsidiary(-ies).
-** All rights reserved.
-** Contact: Nokia Corporation (qt-info@nokia.com)
-**
-** This file is part of the QtCore module of the Qt Toolkit.
-**
-** $QT_BEGIN_LICENSE:LGPL$
-** GNU Lesser General Public License Usage
-** This file may be used under the terms of the GNU Lesser General Public
-** License version 2.1 as published by the Free Software Foundation and
-** appearing in the file LICENSE.LGPL included in the packaging of this
-** file. Please review the following information to ensure the GNU Lesser
-** General Public License version 2.1 requirements will be met:
-** http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
-**
-** In addition, as a special exception, Nokia gives you certain additional
-** rights. These rights are described in the Nokia Qt LGPL Exception
-** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
-**
-** GNU General Public License Usage
-** Alternatively, this file may be used under the terms of the GNU General
-** Public License version 3.0 as published by the Free Software Foundation
-** and appearing in the file LICENSE.GPL included in the packaging of this
-** file. Please review the following information to ensure the GNU General
-** Public License version 3.0 requirements will be met:
-** http://www.gnu.org/copyleft/gpl.html.
-**
-** Other Usage
-** Alternatively, this file may be used in accordance with the terms and
-** conditions contained in a signed written agreement between you and Nokia.
-**
-**
-**
-**
-**
-** $QT_END_LICENSE$
-**
-****************************************************************************/
-
-#include "qt_hybridheap_symbian_p.h"
-
-#ifdef QT_USE_NEW_SYMBIAN_ALLOCATOR
-
-#define GM (&iGlobalMallocState)
-#define __HEAP_CORRUPTED_TRACE(t,p,l) BTraceContext12(BTrace::EHeap, BTrace::EHeapCorruption, (TUint32)t, (TUint32)p, (TUint32)l);
-#define __HEAP_CORRUPTED_TEST(c,x, p,l) if (!c) { if (iFlags & (EMonitorMemory+ETraceAllocs) ) __HEAP_CORRUPTED_TRACE(this,p,l) HEAP_PANIC(x); }
-#define __HEAP_CORRUPTED_TEST_STATIC(c,t,x,p,l) if (!c) { if (t && (t->iFlags & (EMonitorMemory+ETraceAllocs) )) __HEAP_CORRUPTED_TRACE(t,p,l) HEAP_PANIC(x); }
-
-TInt RHybridHeap::DebugFunction(TInt aFunc, TAny* a1, TAny* a2)
-{
- TInt r = KErrNone;
- switch(aFunc)
- {
-
- case RAllocator::ECount:
- struct HeapInfo info;
- Lock();
- GetInfo(&info, NULL);
- *(unsigned*)a1 = info.iFreeN;
- r = info.iAllocN;
- Unlock();
- break;
-
- case RAllocator::EMarkStart:
- __DEBUG_ONLY(DoMarkStart());
- break;
-
- case RAllocator::EMarkEnd:
- __DEBUG_ONLY( r = DoMarkEnd((TInt)a1) );
- break;
-
- case RAllocator::ECheck:
- r = DoCheckHeap((SCheckInfo*)a1);
- break;
-
- case RAllocator::ESetFail:
- __DEBUG_ONLY(DoSetAllocFail((TAllocFail)(TInt)a1, (TInt)a2));
- break;
-
- case RHybridHeap::EGetFail:
- __DEBUG_ONLY(r = iFailType);
- break;
-
- case RHybridHeap::ESetBurstFail:
-#if _DEBUG
- {
- SRAllocatorBurstFail* fail = (SRAllocatorBurstFail*) a2;
- DoSetAllocFail((TAllocFail)(TInt)a1, fail->iRate, fail->iBurst);
- }
-#endif
- break;
-
- case RHybridHeap::ECheckFailure:
- // iRand will be incremented for each EFailNext, EBurstFailNext,
- // EDeterministic and EBurstDeterministic failure.
- r = iRand;
- break;
-
- case RAllocator::ECopyDebugInfo:
- {
- TInt nestingLevel = ((SDebugCell*)a1)[-1].nestingLevel;
- ((SDebugCell*)a2)[-1].nestingLevel = nestingLevel;
- break;
- }
-
- case RHybridHeap::EGetSize:
- {
- r = iChunkSize - sizeof(RHybridHeap);
- break;
- }
-
- case RHybridHeap::EGetMaxLength:
- {
- r = iMaxLength;
- break;
- }
-
- case RHybridHeap::EGetBase:
- {
- *(TAny**)a1 = iBase;
- break;
- }
-
- case RHybridHeap::EAlignInteger:
- {
- r = _ALIGN_UP((TInt)a1, iAlign);
- break;
- }
-
- case RHybridHeap::EAlignAddr:
- {
- *(TAny**)a2 = (TAny*)_ALIGN_UP((TLinAddr)a1, iAlign);
- break;
- }
-
- case RHybridHeap::EWalk:
- struct HeapInfo hinfo;
- SWalkInfo winfo;
- Lock();
- winfo.iFunction = (TWalkFunc)a1;
- winfo.iParam = a2;
- winfo.iHeap = (RHybridHeap*)this;
- GetInfo(&hinfo, &winfo);
- Unlock();
- break;
-
-#ifndef __KERNEL_MODE__
-
- case RHybridHeap::EHybridHeap:
- {
- if ( !a1 )
- return KErrGeneral;
- STestCommand* cmd = (STestCommand*)a1;
- switch ( cmd->iCommand )
- {
- case EGetConfig:
- cmd->iConfig.iSlabBits = iSlabConfigBits;
- cmd->iConfig.iDelayedSlabThreshold = iPageThreshold;
- cmd->iConfig.iPagePower = iPageThreshold;
- break;
-
- case ESetConfig:
- //
- // New configuration data for slab and page allocator.
- // Reset heap to get data into use
- //
-#if USE_HYBRID_HEAP
- iSlabConfigBits = cmd->iConfig.iSlabBits & 0x3fff;
- iSlabInitThreshold = cmd->iConfig.iDelayedSlabThreshold;
- iPageThreshold = (cmd->iConfig.iPagePower & 0x1f);
- Reset();
-#endif
- break;
-
- case EHeapMetaData:
- cmd->iData = this;
- break;
-
- case ETestData:
- iTestData = cmd->iData;
- break;
-
- default:
- return KErrNotSupported;
-
- }
-
- break;
- }
-#endif // __KERNEL_MODE
-
- default:
- return KErrNotSupported;
-
- }
- return r;
-}
-
-void RHybridHeap::Walk(SWalkInfo* aInfo, TAny* aBfr, TInt aLth, TCellType aBfrType, TAllocatorType aAllocatorType)
-{
- //
- // This function is always called from RHybridHeap::GetInfo.
- // Actual walk function is called if SWalkInfo pointer is defined
- //
- //
- if ( aInfo )
- {
-#ifdef __KERNEL_MODE__
- (void)aAllocatorType;
-#if defined(_DEBUG)
- if ( aBfrType == EGoodAllocatedCell )
- aInfo->iFunction(aInfo->iParam, aBfrType, ((TUint8*)aBfr+EDebugHdrSize), (aLth-EDebugHdrSize) );
- else
- aInfo->iFunction(aInfo->iParam, aBfrType, aBfr, aLth );
-#else
- aInfo->iFunction(aInfo->iParam, aBfrType, aBfr, aLth );
-#endif
-
-#else // __KERNEL_MODE__
-
- if ( aAllocatorType & (EFullSlab + EPartialFullSlab + EEmptySlab + ESlabSpare) )
- {
- if ( aInfo->iHeap )
- {
- TUint32 dummy;
- TInt npages;
- aInfo->iHeap->DoCheckSlab((slab*)aBfr, aAllocatorType);
- __HEAP_CORRUPTED_TEST_STATIC(aInfo->iHeap->CheckBitmap(Floor(aBfr, PAGESIZE), PAGESIZE, dummy, npages),
- aInfo->iHeap, ETHeapBadCellAddress, aBfr, aLth);
- }
- if ( aAllocatorType & EPartialFullSlab )
- WalkPartialFullSlab(aInfo, (slab*)aBfr, aBfrType, aLth);
- else if ( aAllocatorType & EFullSlab )
- WalkFullSlab(aInfo, (slab*)aBfr, aBfrType, aLth);
- }
-#if defined(_DEBUG)
- else if ( aBfrType == EGoodAllocatedCell )
- aInfo->iFunction(aInfo->iParam, aBfrType, ((TUint8*)aBfr+EDebugHdrSize), (aLth-EDebugHdrSize) );
- else
- aInfo->iFunction(aInfo->iParam, aBfrType, aBfr, aLth );
-#else
- else
- aInfo->iFunction(aInfo->iParam, aBfrType, aBfr, aLth );
-#endif
-
-#endif // __KERNEL_MODE
- }
-}
-
-#ifndef __KERNEL_MODE__
-void RHybridHeap::WalkPartialFullSlab(SWalkInfo* aInfo, slab* aSlab, TCellType /*aBfrType*/, TInt /*aLth*/)
-{
- if ( aInfo )
- {
- //
- // Build bitmap of free buffers in the partial full slab
- //
- TUint32 bitmap[4];
- __HEAP_CORRUPTED_TEST_STATIC( (aInfo->iHeap != NULL), aInfo->iHeap, ETHeapBadCellAddress, 0, aSlab);
- aInfo->iHeap->BuildPartialSlabBitmap(bitmap, aSlab);
- //
- // Find used (allocated) buffers from iPartial full slab
- //
- TUint32 h = aSlab->iHeader;
- TUint32 size = SlabHeaderSize(h);
- TUint32 count = KMaxSlabPayload / size; // Total buffer count in slab
- TUint32 i = 0;
- TUint32 ix = 0;
- TUint32 bit = 1;
-
- while ( i < count )
- {
-
- if ( bitmap[ix] & bit )
- {
- aInfo->iFunction(aInfo->iParam, EGoodFreeCell, &aSlab->iPayload[i*size], size );
- }
- else
- {
-#if defined(_DEBUG)
- aInfo->iFunction(aInfo->iParam, EGoodAllocatedCell, (&aSlab->iPayload[i*size]+EDebugHdrSize), (size-EDebugHdrSize) );
-#else
- aInfo->iFunction(aInfo->iParam, EGoodAllocatedCell, &aSlab->iPayload[i*size], size );
-#endif
- }
- bit <<= 1;
- if ( bit == 0 )
- {
- bit = 1;
- ix ++;
- }
-
- i ++;
- }
- }
-
-}
-
-void RHybridHeap::WalkFullSlab(SWalkInfo* aInfo, slab* aSlab, TCellType aBfrType, TInt /*aLth*/)
-{
- if ( aInfo )
- {
- TUint32 h = aSlab->iHeader;
- TUint32 size = SlabHeaderSize(h);
- TUint32 count = (SlabHeaderUsedm4(h) + 4) / size;
- TUint32 i = 0;
- while ( i < count )
- {
-#if defined(_DEBUG)
- if ( aBfrType == EGoodAllocatedCell )
- aInfo->iFunction(aInfo->iParam, aBfrType, (&aSlab->iPayload[i*size]+EDebugHdrSize), (size-EDebugHdrSize) );
- else
- aInfo->iFunction(aInfo->iParam, aBfrType, &aSlab->iPayload[i*size], size );
-#else
- aInfo->iFunction(aInfo->iParam, aBfrType, &aSlab->iPayload[i*size], size );
-#endif
- i ++;
- }
- }
-}
-
-void RHybridHeap::BuildPartialSlabBitmap(TUint32* aBitmap, slab* aSlab, TAny* aBfr)
-{
- //
- // Build a bitmap of free buffers in a partial full slab
- //
- TInt i;
- TUint32 bit = 0;
- TUint32 index;
- TUint32 h = aSlab->iHeader;
- TUint32 used = SlabHeaderUsedm4(h)+4;
- TUint32 size = SlabHeaderSize(h);
- TInt count = (KMaxSlabPayload / size);
- TInt free_count = count - (used / size); // Total free buffer count in slab
- aBitmap[0] = 0, aBitmap[1] = 0, aBitmap[2] = 0, aBitmap[3] = 0;
- TUint32 offs = (h & 0xff) << 2;
-
- //
- // Process first buffer in partial slab free buffer chain
- //
- while ( offs )
- {
- unsigned char* p = (unsigned char*)Offset(aSlab, offs);
- __HEAP_CORRUPTED_TEST( (sizeof(slabhdr) <= offs), ETHeapBadCellAddress, p, aSlab);
- offs -= sizeof(slabhdr);
- __HEAP_CORRUPTED_TEST( (offs % size == 0), ETHeapBadCellAddress, p, aSlab);
- index = (offs / size); // Bit index in bitmap
- i = 0;
- while ( i < 4 )
- {
- if ( index < 32 )
- {
- bit = (1 << index);
- break;
- }
- index -= 32;
- i ++;
- }
-
- __HEAP_CORRUPTED_TEST( ((aBitmap[i] & bit) == 0), ETHeapBadCellAddress, p, aSlab); // Buffer already in chain
-
- aBitmap[i] |= bit;
- free_count --;
- offs = ((unsigned)*p) << 2; // Next in free chain
- }
-
- __HEAP_CORRUPTED_TEST( (free_count >= 0), ETHeapBadCellAddress, aBfr, aSlab); // free buffer count/size mismatch
- //
- // Process next rest of the free buffers which are in the
- // wilderness (at end of the slab)
- //
- index = count - 1;
- i = index / 32;
- index = index % 32;
- while ( free_count && (i >= 0))
- {
- bit = (1 << index);
- __HEAP_CORRUPTED_TEST( ((aBitmap[i] & bit) == 0), ETHeapBadCellAddress, aBfr, aSlab); // Buffer already in chain
- aBitmap[i] |= bit;
- if ( index )
- index --;
- else
- {
- index = 31;
- i --;
- }
- free_count --;
- }
-
- if ( aBfr ) // Assure that specified buffer does NOT exist in partial slab free buffer chain
- {
- offs = LowBits(aBfr, SLABSIZE);
- __HEAP_CORRUPTED_TEST( (sizeof(slabhdr) <= offs), ETHeapBadCellAddress, aBfr, aSlab);
- offs -= sizeof(slabhdr);
- __HEAP_CORRUPTED_TEST( ((offs % size) == 0), ETHeapBadCellAddress, aBfr, aSlab);
- index = (offs / size); // Bit index in bitmap
- i = 0;
- while ( i < 4 )
- {
- if ( index < 32 )
- {
- bit = (1 << index);
- break;
- }
- index -= 32;
- i ++;
- }
- __HEAP_CORRUPTED_TEST( ((aBitmap[i] & bit) == 0), ETHeapBadCellAddress, aBfr, aSlab); // Buffer already in chain
- }
-}
-
-#endif // __KERNEL_MODE__
-
-void RHybridHeap::WalkCheckCell(TAny* aPtr, TCellType aType, TAny* aCell, TInt aLen)
-{
- (void)aCell;
- SHeapCellInfo& info = *(SHeapCellInfo*)aPtr;
- switch(aType)
- {
- case EGoodAllocatedCell:
- {
- ++info.iTotalAlloc;
- info.iTotalAllocSize += aLen;
-#if defined(_DEBUG)
- RHybridHeap& h = *info.iHeap;
- SDebugCell* DbgCell = (SDebugCell*)((TUint8*)aCell-EDebugHdrSize);
- if ( DbgCell->nestingLevel == h.iNestingLevel )
- {
- if (++info.iLevelAlloc==1)
- info.iStranded = DbgCell;
-#ifdef __KERNEL_MODE__
- if (KDebugNum(KSERVER) || KDebugNum(KTESTFAST))
- {
- Kern::Printf("LEAKED KERNEL HEAP CELL @ %08x : len=%d", aCell, aLen);
- TLinAddr base = ((TLinAddr)aCell)&~0x0f;
- TLinAddr end = ((TLinAddr)aCell)+(TLinAddr)aLen;
- while(base<end)
- {
- const TUint32* p = (const TUint32*)base;
- Kern::Printf("%08x: %08x %08x %08x %08x", p, p[0], p[1], p[2], p[3]);
- base += 16;
- }
- }
-#endif
- }
-#endif
- break;
- }
- case EGoodFreeCell:
- ++info.iTotalFree;
- break;
- case EBadAllocatedCellSize:
- HEAP_PANIC(ETHeapBadAllocatedCellSize);
- case EBadAllocatedCellAddress:
- HEAP_PANIC(ETHeapBadAllocatedCellAddress);
- case EBadFreeCellAddress:
- HEAP_PANIC(ETHeapBadFreeCellAddress);
- case EBadFreeCellSize:
- HEAP_PANIC(ETHeapBadFreeCellSize);
- default:
- HEAP_PANIC(ETHeapWalkBadCellType);
- }
-}
-
-
-TInt RHybridHeap::DoCheckHeap(SCheckInfo* aInfo)
-{
- (void)aInfo;
- SHeapCellInfo info;
- memclr(&info, sizeof(info));
- info.iHeap = this;
- struct HeapInfo hinfo;
- SWalkInfo winfo;
- Lock();
- DoCheckMallocState(GM); // Check DL heap internal structure
-#ifndef __KERNEL_MODE__
- TUint32 dummy;
- TInt npages;
- __HEAP_CORRUPTED_TEST(CheckBitmap(NULL, 0, dummy, npages), ETHeapBadCellAddress, this, 0); // Check page allocator buffers
- DoCheckSlabTrees();
- DoCheckCommittedSize(npages, GM);
-#endif
- winfo.iFunction = WalkCheckCell;
- winfo.iParam = &info;
- winfo.iHeap = (RHybridHeap*)this;
- GetInfo(&hinfo, &winfo);
- Unlock();
-
-#if defined(_DEBUG)
- if (!aInfo)
- return KErrNone;
- TInt expected = aInfo->iCount;
- TInt actual = aInfo->iAll ? info.iTotalAlloc : info.iLevelAlloc;
- if (actual!=expected && !iTestData)
- {
-#ifdef __KERNEL_MODE__
- Kern::Fault("KERN-ALLOC COUNT", (expected<<16)|actual );
-#else
- User::Panic(_L("ALLOC COUNT"), (expected<<16)|actual );
-#endif
- }
-#endif
- return KErrNone;
-}
-
-#ifdef _DEBUG
-void RHybridHeap::DoMarkStart()
-{
- if (iNestingLevel==0)
- iAllocCount=0;
- iNestingLevel++;
-}
-
-TUint32 RHybridHeap::DoMarkEnd(TInt aExpected)
-{
- if (iNestingLevel==0)
- return 0;
- SHeapCellInfo info;
- SHeapCellInfo* p = iTestData ? (SHeapCellInfo*)iTestData : &info;
- memclr(p, sizeof(info));
- p->iHeap = this;
- struct HeapInfo hinfo;
- SWalkInfo winfo;
- Lock();
- winfo.iFunction = WalkCheckCell;
- winfo.iParam = p;
- winfo.iHeap = (RHybridHeap*)this;
- GetInfo(&hinfo, &winfo);
- Unlock();
-
- if (p->iLevelAlloc != aExpected && !iTestData)
- return (TUint32)(p->iStranded + 1);
- if (--iNestingLevel == 0)
- iAllocCount = 0;
- return 0;
-}
-
-void RHybridHeap::DoSetAllocFail(TAllocFail aType, TInt aRate)
-{// Default to a burst mode of 1, as aType may be a burst type.
- DoSetAllocFail(aType, aRate, 1);
-}
-
-void ResetAllocCellLevels(TAny* aPtr, RHybridHeap::TCellType aType, TAny* aCell, TInt aLen)
-{
- (void)aPtr;
- (void)aLen;
-
- if (aType == RHybridHeap::EGoodAllocatedCell)
- {
- RHybridHeap::SDebugCell* DbgCell = (RHybridHeap::SDebugCell*)((TUint8*)aCell-RHybridHeap::EDebugHdrSize);
- DbgCell->nestingLevel = 0;
- }
-}
-
-// Don't change as the ETHeapBadDebugFailParameter check below and the API
-// documentation rely on this being 16 for RHybridHeap.
-LOCAL_D const TInt KBurstFailRateShift = 16;
-LOCAL_D const TInt KBurstFailRateMask = (1 << KBurstFailRateShift) - 1;
-
-void RHybridHeap::DoSetAllocFail(TAllocFail aType, TInt aRate, TUint aBurst)
-{
- if (aType==EReset)
- {
- // reset levels of all allocated cells to 0
- // this should prevent subsequent tests failing unnecessarily
- iFailed = EFalse; // Reset for ECheckFailure relies on this.
- struct HeapInfo hinfo;
- SWalkInfo winfo;
- Lock();
- winfo.iFunction = (TWalkFunc)&ResetAllocCellLevels;
- winfo.iParam = NULL;
- winfo.iHeap = (RHybridHeap*)this;
- GetInfo(&hinfo, &winfo);
- Unlock();
- // reset heap allocation mark as well
- iNestingLevel=0;
- iAllocCount=0;
- aType=ENone;
- }
-
- switch (aType)
- {
- case EBurstRandom:
- case EBurstTrueRandom:
- case EBurstDeterministic:
- case EBurstFailNext:
- // If the fail type is a burst type then iFailRate is split in 2:
- // the 16 lsbs are the fail rate and the 16 msbs are the burst length.
- if (TUint(aRate) > (TUint)KMaxTUint16 || aBurst > KMaxTUint16)
- HEAP_PANIC(ETHeapBadDebugFailParameter);
-
- iFailed = EFalse;
- iFailType = aType;
- iFailRate = (aRate == 0) ? 1 : aRate;
- iFailAllocCount = -iFailRate;
- iFailRate = iFailRate | (aBurst << KBurstFailRateShift);
- break;
-
- default:
- iFailed = EFalse;
- iFailType = aType;
- iFailRate = (aRate == 0) ? 1 : aRate; // A rate of <1 is meaningless
- iFailAllocCount = 0;
- break;
- }
-
- // Set up iRand for either:
- // - random seed value, or
- // - a count of the number of failures so far.
- iRand = 0;
-#ifndef __KERNEL_MODE__
- switch (iFailType)
- {
- case ETrueRandom:
- case EBurstTrueRandom:
- {
- TTime time;
- time.HomeTime();
- TInt64 seed = time.Int64();
- iRand = Math::Rand(seed);
- break;
- }
- case ERandom:
- case EBurstRandom:
- {
- TInt64 seed = 12345;
- iRand = Math::Rand(seed);
- break;
- }
- default:
- break;
- }
-#endif
-}
-
-TBool RHybridHeap::CheckForSimulatedAllocFail()
-//
-// Check to see if the user has requested simulated alloc failure, and if so possibly
-// Return ETrue indicating a failure.
-//
-{
- // For burst mode failures iFailRate is shared
- TUint16 rate = (TUint16)(iFailRate & KBurstFailRateMask);
- TUint16 burst = (TUint16)(iFailRate >> KBurstFailRateShift);
- TBool r = EFalse;
- switch (iFailType)
- {
-#ifndef __KERNEL_MODE__
- case ERandom:
- case ETrueRandom:
- if (++iFailAllocCount>=iFailRate)
- {
- iFailAllocCount=0;
- if (!iFailed) // haven't failed yet after iFailRate allocations so fail now
- return(ETrue);
- iFailed=EFalse;
- }
- else
- {
- if (!iFailed)
- {
- TInt64 seed=iRand;
- iRand=Math::Rand(seed);
- if (iRand%iFailRate==0)
- {
- iFailed=ETrue;
- return(ETrue);
- }
- }
- }
- break;
-
- case EBurstRandom:
- case EBurstTrueRandom:
- if (++iFailAllocCount < 0)
- {
- // We haven't started failing yet so should we now?
- TInt64 seed = iRand;
- iRand = Math::Rand(seed);
- if (iRand % rate == 0)
- {// Fail now. Reset iFailAllocCount so we fail burst times
- iFailAllocCount = 0;
- r = ETrue;
- }
- }
- else
- {
- if (iFailAllocCount < burst)
- {// Keep failing for burst times
- r = ETrue;
- }
- else
- {// We've now failed burst times so start again.
- iFailAllocCount = -(rate - 1);
- }
- }
- break;
-#endif
- case EDeterministic:
- if (++iFailAllocCount%iFailRate==0)
- {
- r=ETrue;
- iRand++; // Keep count of how many times we have failed
- }
- break;
-
- case EBurstDeterministic:
- // This will fail burst number of times, every rate attempts.
- if (++iFailAllocCount >= 0)
- {
- if (iFailAllocCount == burst - 1)
- {// This is the burst time we have failed so make it the last by
- // reseting counts so we next fail after rate attempts.
- iFailAllocCount = -rate;
- }
- r = ETrue;
- iRand++; // Keep count of how many times we have failed
- }
- break;
-
- case EFailNext:
- if ((++iFailAllocCount%iFailRate)==0)
- {
- iFailType=ENone;
- r=ETrue;
- iRand++; // Keep count of how many times we have failed
- }
- break;
-
- case EBurstFailNext:
- if (++iFailAllocCount >= 0)
- {
- if (iFailAllocCount == burst - 1)
- {// This is the burst time we have failed so make it the last.
- iFailType = ENone;
- }
- r = ETrue;
- iRand++; // Keep count of how many times we have failed
- }
- break;
-
- default:
- break;
- }
- return r;
-}
-
-#endif // DEBUG
-
-//
-// Methods for Doug Lea allocator detailed check
-//
-
-void RHybridHeap::DoCheckAnyChunk(mstate m, mchunkptr p)
-{
- __HEAP_CORRUPTED_TEST(((IS_ALIGNED(CHUNK2MEM(p))) || (p->iHead == FENCEPOST_HEAD)), ETHeapBadCellAddress, p, 0);
- (void)m;
-}
-
-/* Check properties of iTop chunk */
-void RHybridHeap::DoCheckTopChunk(mstate m, mchunkptr p)
-{
- msegmentptr sp = &m->iSeg;
- size_t sz = CHUNKSIZE(p);
- __HEAP_CORRUPTED_TEST((sp != 0), ETHeapBadCellAddress, p, 0);
- __HEAP_CORRUPTED_TEST(((IS_ALIGNED(CHUNK2MEM(p))) || (p->iHead == FENCEPOST_HEAD)), ETHeapBadCellAddress, p,0);
- __HEAP_CORRUPTED_TEST((sz == m->iTopSize), ETHeapBadCellAddress,p,0);
- __HEAP_CORRUPTED_TEST((sz > 0), ETHeapBadCellAddress,p,0);
- __HEAP_CORRUPTED_TEST((sz == ((sp->iBase + sp->iSize) - (TUint8*)p) - TOP_FOOT_SIZE), ETHeapBadCellAddress,p,0);
- __HEAP_CORRUPTED_TEST((PINUSE(p)), ETHeapBadCellAddress,p,0);
- __HEAP_CORRUPTED_TEST((!NEXT_PINUSE(p)), ETHeapBadCellAddress,p,0);
-}
-
-/* Check properties of inuse chunks */
-void RHybridHeap::DoCheckInuseChunk(mstate m, mchunkptr p)
-{
- DoCheckAnyChunk(m, p);
- __HEAP_CORRUPTED_TEST((CINUSE(p)), ETHeapBadCellAddress,p,0);
- __HEAP_CORRUPTED_TEST((NEXT_PINUSE(p)), ETHeapBadCellAddress,p,0);
- /* If not PINUSE and not mmapped, previous chunk has OK offset */
- __HEAP_CORRUPTED_TEST((PINUSE(p) || NEXT_CHUNK(PREV_CHUNK(p)) == p), ETHeapBadCellAddress,p,0);
-}
-
-/* Check properties of free chunks */
-void RHybridHeap::DoCheckFreeChunk(mstate m, mchunkptr p)
-{
- size_t sz = p->iHead & ~(PINUSE_BIT|CINUSE_BIT);
- mchunkptr next = CHUNK_PLUS_OFFSET(p, sz);
- DoCheckAnyChunk(m, p);
- __HEAP_CORRUPTED_TEST((!CINUSE(p)), ETHeapBadCellAddress,p,0);
- __HEAP_CORRUPTED_TEST((!NEXT_PINUSE(p)), ETHeapBadCellAddress,p,0);
- if (p != m->iDv && p != m->iTop)
- {
- if (sz >= MIN_CHUNK_SIZE)
- {
- __HEAP_CORRUPTED_TEST(((sz & CHUNK_ALIGN_MASK) == 0), ETHeapBadCellAddress,p,0);
- __HEAP_CORRUPTED_TEST((IS_ALIGNED(CHUNK2MEM(p))), ETHeapBadCellAddress,p,0);
- __HEAP_CORRUPTED_TEST((next->iPrevFoot == sz), ETHeapBadCellAddress,p,0);
- __HEAP_CORRUPTED_TEST((PINUSE(p)), ETHeapBadCellAddress,p,0);
- __HEAP_CORRUPTED_TEST( (next == m->iTop || CINUSE(next)), ETHeapBadCellAddress,p,0);
- __HEAP_CORRUPTED_TEST((p->iFd->iBk == p), ETHeapBadCellAddress,p,0);
- __HEAP_CORRUPTED_TEST((p->iBk->iFd == p), ETHeapBadCellAddress,p,0);
- }
- else /* markers are always of size SIZE_T_SIZE */
- __HEAP_CORRUPTED_TEST((sz == SIZE_T_SIZE), ETHeapBadCellAddress,p,0);
- }
-}
-
-/* Check properties of malloced chunks at the point they are malloced */
-void RHybridHeap::DoCheckMallocedChunk(mstate m, void* mem, size_t s)
-{
- if (mem != 0)
- {
- mchunkptr p = MEM2CHUNK(mem);
- size_t sz = p->iHead & ~(PINUSE_BIT|CINUSE_BIT);
- DoCheckInuseChunk(m, p);
- __HEAP_CORRUPTED_TEST(((sz & CHUNK_ALIGN_MASK) == 0), ETHeapBadCellAddress,p,0);
- __HEAP_CORRUPTED_TEST((sz >= MIN_CHUNK_SIZE), ETHeapBadCellAddress,p,0);
- __HEAP_CORRUPTED_TEST((sz >= s), ETHeapBadCellAddress,p,0);
- /* unless mmapped, size is less than MIN_CHUNK_SIZE more than request */
- __HEAP_CORRUPTED_TEST((sz < (s + MIN_CHUNK_SIZE)), ETHeapBadCellAddress,p,0);
- }
-}
-
-/* Check a tree and its subtrees. */
-void RHybridHeap::DoCheckTree(mstate m, tchunkptr t)
-{
- tchunkptr head = 0;
- tchunkptr u = t;
- bindex_t tindex = t->iIndex;
- size_t tsize = CHUNKSIZE(t);
- bindex_t idx;
- DoComputeTreeIndex(tsize, idx);
- __HEAP_CORRUPTED_TEST((tindex == idx), ETHeapBadCellAddress,u,0);
- __HEAP_CORRUPTED_TEST((tsize >= MIN_LARGE_SIZE), ETHeapBadCellAddress,u,0);
- __HEAP_CORRUPTED_TEST((tsize >= MINSIZE_FOR_TREE_INDEX(idx)), ETHeapBadCellAddress,u,0);
- __HEAP_CORRUPTED_TEST(((idx == NTREEBINS-1) || (tsize < MINSIZE_FOR_TREE_INDEX((idx+1)))), ETHeapBadCellAddress,u,0);
-
- do
- { /* traverse through chain of same-sized nodes */
- DoCheckAnyChunk(m, ((mchunkptr)u));
- __HEAP_CORRUPTED_TEST((u->iIndex == tindex), ETHeapBadCellAddress,u,0);
- __HEAP_CORRUPTED_TEST((CHUNKSIZE(u) == tsize), ETHeapBadCellAddress,u,0);
- __HEAP_CORRUPTED_TEST((!CINUSE(u)), ETHeapBadCellAddress,u,0);
- __HEAP_CORRUPTED_TEST((!NEXT_PINUSE(u)), ETHeapBadCellAddress,u,0);
- __HEAP_CORRUPTED_TEST((u->iFd->iBk == u), ETHeapBadCellAddress,u,0);
- __HEAP_CORRUPTED_TEST((u->iBk->iFd == u), ETHeapBadCellAddress,u,0);
- if (u->iParent == 0)
- {
- __HEAP_CORRUPTED_TEST((u->iChild[0] == 0), ETHeapBadCellAddress,u,0);
- __HEAP_CORRUPTED_TEST((u->iChild[1] == 0), ETHeapBadCellAddress,u,0);
- }
- else
- {
- __HEAP_CORRUPTED_TEST((head == 0), ETHeapBadCellAddress,u,0); /* only one node on chain has iParent */
- head = u;
- __HEAP_CORRUPTED_TEST((u->iParent != u), ETHeapBadCellAddress,u,0);
- __HEAP_CORRUPTED_TEST( (u->iParent->iChild[0] == u ||
- u->iParent->iChild[1] == u ||
- *((tbinptr*)(u->iParent)) == u), ETHeapBadCellAddress,u,0);
- if (u->iChild[0] != 0)
- {
- __HEAP_CORRUPTED_TEST((u->iChild[0]->iParent == u), ETHeapBadCellAddress,u,0);
- __HEAP_CORRUPTED_TEST((u->iChild[0] != u), ETHeapBadCellAddress,u,0);
- DoCheckTree(m, u->iChild[0]);
- }
- if (u->iChild[1] != 0)
- {
- __HEAP_CORRUPTED_TEST((u->iChild[1]->iParent == u), ETHeapBadCellAddress,u,0);
- __HEAP_CORRUPTED_TEST((u->iChild[1] != u), ETHeapBadCellAddress,u,0);
- DoCheckTree(m, u->iChild[1]);
- }
- if (u->iChild[0] != 0 && u->iChild[1] != 0)
- {
- __HEAP_CORRUPTED_TEST((CHUNKSIZE(u->iChild[0]) < CHUNKSIZE(u->iChild[1])), ETHeapBadCellAddress,u,0);
- }
- }
- u = u->iFd;
- }
- while (u != t);
- __HEAP_CORRUPTED_TEST((head != 0), ETHeapBadCellAddress,u,0);
-}
-
-/* Check all the chunks in a treebin. */
-void RHybridHeap::DoCheckTreebin(mstate m, bindex_t i)
-{
- tbinptr* tb = TREEBIN_AT(m, i);
- tchunkptr t = *tb;
- int empty = (m->iTreeMap & (1U << i)) == 0;
- if (t == 0)
- __HEAP_CORRUPTED_TEST((empty), ETHeapBadCellAddress,t,0);
- if (!empty)
- DoCheckTree(m, t);
-}
-
-/* Check all the chunks in a smallbin. */
-void RHybridHeap::DoCheckSmallbin(mstate m, bindex_t i)
-{
- sbinptr b = SMALLBIN_AT(m, i);
- mchunkptr p = b->iBk;
- unsigned int empty = (m->iSmallMap & (1U << i)) == 0;
- if (p == b)
- __HEAP_CORRUPTED_TEST((empty), ETHeapBadCellAddress,p,0);
- if (!empty)
- {
- for (; p != b; p = p->iBk)
- {
- size_t size = CHUNKSIZE(p);
- mchunkptr q;
- /* each chunk claims to be free */
- DoCheckFreeChunk(m, p);
- /* chunk belongs in bin */
- __HEAP_CORRUPTED_TEST((SMALL_INDEX(size) == i), ETHeapBadCellAddress,p,0);
- __HEAP_CORRUPTED_TEST((p->iBk == b || CHUNKSIZE(p->iBk) == CHUNKSIZE(p)), ETHeapBadCellAddress,p,0);
- /* chunk is followed by an inuse chunk */
- q = NEXT_CHUNK(p);
- if (q->iHead != FENCEPOST_HEAD)
- DoCheckInuseChunk(m, q);
- }
- }
-}
-
-/* Find x in a bin. Used in other check functions. */
-TInt RHybridHeap::BinFind(mstate m, mchunkptr x)
-{
- size_t size = CHUNKSIZE(x);
- if (IS_SMALL(size))
- {
- bindex_t sidx = SMALL_INDEX(size);
- sbinptr b = SMALLBIN_AT(m, sidx);
- if (SMALLMAP_IS_MARKED(m, sidx))
- {
- mchunkptr p = b;
- do
- {
- if (p == x)
- return 1;
- }
- while ((p = p->iFd) != b);
- }
- }
- else
- {
- bindex_t tidx;
- DoComputeTreeIndex(size, tidx);
- if (TREEMAP_IS_MARKED(m, tidx))
- {
- tchunkptr t = *TREEBIN_AT(m, tidx);
- size_t sizebits = size << LEFTSHIFT_FOR_TREE_INDEX(tidx);
- while (t != 0 && CHUNKSIZE(t) != size)
- {
- t = t->iChild[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1];
- sizebits <<= 1;
- }
- if (t != 0)
- {
- tchunkptr u = t;
- do
- {
- if (u == (tchunkptr)x)
- return 1;
- }
- while ((u = u->iFd) != t);
- }
- }
- }
- return 0;
-}
-
-/* Traverse each chunk and check it; return total */
-size_t RHybridHeap::TraverseAndCheck(mstate m)
-{
- size_t sum = 0;
- msegmentptr s = &m->iSeg;
- sum += m->iTopSize + TOP_FOOT_SIZE;
- mchunkptr q = ALIGN_AS_CHUNK(s->iBase);
- mchunkptr lastq = 0;
- __HEAP_CORRUPTED_TEST((PINUSE(q)), ETHeapBadCellAddress,q,0);
- while (q != m->iTop && q->iHead != FENCEPOST_HEAD)
- {
- sum += CHUNKSIZE(q);
- if (CINUSE(q))
- {
- __HEAP_CORRUPTED_TEST((!BinFind(m, q)), ETHeapBadCellAddress,q,0);
- DoCheckInuseChunk(m, q);
- }
- else
- {
- __HEAP_CORRUPTED_TEST((q == m->iDv || BinFind(m, q)), ETHeapBadCellAddress,q,0);
- __HEAP_CORRUPTED_TEST((lastq == 0 || CINUSE(lastq)), ETHeapBadCellAddress,q,0); /* Not 2 consecutive free */
- DoCheckFreeChunk(m, q);
- }
- lastq = q;
- q = NEXT_CHUNK(q);
- }
- return sum;
-}
-
-/* Check all properties of malloc_state. */
-void RHybridHeap::DoCheckMallocState(mstate m)
-{
- bindex_t i;
-// size_t total;
- /* check bins */
- for (i = 0; i < NSMALLBINS; ++i)
- DoCheckSmallbin(m, i);
- for (i = 0; i < NTREEBINS; ++i)
- DoCheckTreebin(m, i);
-
- if (m->iDvSize != 0)
- { /* check iDv chunk */
- DoCheckAnyChunk(m, m->iDv);
- __HEAP_CORRUPTED_TEST((m->iDvSize == CHUNKSIZE(m->iDv)), ETHeapBadCellAddress,m->iDv,0);
- __HEAP_CORRUPTED_TEST((m->iDvSize >= MIN_CHUNK_SIZE), ETHeapBadCellAddress,m->iDv,0);
- __HEAP_CORRUPTED_TEST((BinFind(m, m->iDv) == 0), ETHeapBadCellAddress,m->iDv,0);
- }
-
- if (m->iTop != 0)
- { /* check iTop chunk */
- DoCheckTopChunk(m, m->iTop);
- __HEAP_CORRUPTED_TEST((m->iTopSize == CHUNKSIZE(m->iTop)), ETHeapBadCellAddress,m->iTop,0);
- __HEAP_CORRUPTED_TEST((m->iTopSize > 0), ETHeapBadCellAddress,m->iTop,0);
- __HEAP_CORRUPTED_TEST((BinFind(m, m->iTop) == 0), ETHeapBadCellAddress,m->iTop,0);
- }
-
-// total =
- TraverseAndCheck(m);
-}
-
-#ifndef __KERNEL_MODE__
-//
-// Methods for Slab allocator detailed check
-//
-void RHybridHeap::DoCheckSlabTree(slab** aS, TBool aPartialPage)
-{
- slab* s = *aS;
- if (!s)
- return;
-
- TUint size = SlabHeaderSize(s->iHeader);
- slab** parent = aS;
- slab** child2 = &s->iChild2;
-
- while ( s )
- {
- __HEAP_CORRUPTED_TEST((s->iParent == parent), ETHeapBadCellAddress,s,SLABSIZE);
- __HEAP_CORRUPTED_TEST((!s->iChild1 || s < s->iChild1), ETHeapBadCellAddress,s,SLABSIZE);
- __HEAP_CORRUPTED_TEST((!s->iChild2 || s < s->iChild2), ETHeapBadCellAddress,s,SLABSIZE);
-
- if ( aPartialPage )
- {
- if ( s->iChild1 )
- size = SlabHeaderSize(s->iChild1->iHeader);
- }
- else
- {
- __HEAP_CORRUPTED_TEST((SlabHeaderSize(s->iHeader) == size), ETHeapBadCellAddress,s,SLABSIZE);
- }
- parent = &s->iChild1;
- s = s->iChild1;
-
- }
-
- parent = child2;
- s = *child2;
-
- while ( s )
- {
- __HEAP_CORRUPTED_TEST((s->iParent == parent), ETHeapBadCellAddress,s,SLABSIZE);
- __HEAP_CORRUPTED_TEST((!s->iChild1 || s < s->iChild1), ETHeapBadCellAddress,s,SLABSIZE);
- __HEAP_CORRUPTED_TEST((!s->iChild2 || s < s->iChild2), ETHeapBadCellAddress,s,SLABSIZE);
-
- if ( aPartialPage )
- {
- if ( s->iChild2 )
- size = SlabHeaderSize(s->iChild2->iHeader);
- }
- else
- {
- __HEAP_CORRUPTED_TEST((SlabHeaderSize(s->iHeader) == size), ETHeapBadCellAddress,s,SLABSIZE);
- }
- parent = &s->iChild2;
- s = s->iChild2;
-
- }
-
-}
-
-void RHybridHeap::DoCheckSlabTrees()
-{
- for (TInt i = 0; i < (MAXSLABSIZE>>2); ++i)
- DoCheckSlabTree(&iSlabAlloc[i].iPartial, EFalse);
- DoCheckSlabTree(&iPartialPage, ETrue);
-}
-
-void RHybridHeap::DoCheckSlab(slab* aSlab, TAllocatorType aSlabType, TAny* aBfr)
-{
- if ( (aSlabType == ESlabSpare) || (aSlabType == EEmptySlab) )
- return;
-
- unsigned h = aSlab->iHeader;
- __HEAP_CORRUPTED_TEST((ZEROBITS(h)), ETHeapBadCellAddress,aBfr,aSlab);
- unsigned used = SlabHeaderUsedm4(h)+4;
- unsigned size = SlabHeaderSize(h);
- __HEAP_CORRUPTED_TEST( (used < SLABSIZE),ETHeapBadCellAddress, aBfr, aSlab);
- __HEAP_CORRUPTED_TEST( ((size > 3 ) && (size <= MAXSLABSIZE)), ETHeapBadCellAddress,aBfr,aSlab);
- unsigned count = 0;
-
- switch ( aSlabType )
- {
- case EFullSlab:
- count = (KMaxSlabPayload / size );
- __HEAP_CORRUPTED_TEST((used == count*size), ETHeapBadCellAddress,aBfr,aSlab);
- __HEAP_CORRUPTED_TEST((HeaderFloating(h)), ETHeapBadCellAddress,aBfr,aSlab);
- break;
-
- case EPartialFullSlab:
- __HEAP_CORRUPTED_TEST(((used % size)==0),ETHeapBadCellAddress,aBfr,aSlab);
- __HEAP_CORRUPTED_TEST(((SlabHeaderFree(h) == 0) || (((SlabHeaderFree(h)<<2)-sizeof(slabhdr)) % SlabHeaderSize(h) == 0)),
- ETHeapBadCellAddress,aBfr,aSlab);
- break;
-
- default:
- break;
-
- }
-}
-
-//
-// Check that committed size in heap equals number of pages in bitmap
-// plus size of Doug Lea region
-//
-void RHybridHeap::DoCheckCommittedSize(TInt aNPages, mstate aM)
-{
- TInt total_committed = (aNPages * iPageSize) + aM->iSeg.iSize + (iBase - (TUint8*)this);
- __HEAP_CORRUPTED_TEST((total_committed == iChunkSize), ETHeapBadCellAddress,total_committed,iChunkSize);
-}
-
-#endif // __KERNEL_MODE__
-
-#endif /* QT_USE_NEW_SYMBIAN_ALLOCATOR */
+++ /dev/null
-/****************************************************************************
-**
-** Copyright (C) 2011 Nokia Corporation and/or its subsidiary(-ies).
-** All rights reserved.
-** Contact: Nokia Corporation (qt-info@nokia.com)
-**
-** This file is part of the QtCore module of the Qt Toolkit.
-**
-** $QT_BEGIN_LICENSE:LGPL$
-** GNU Lesser General Public License Usage
-** This file may be used under the terms of the GNU Lesser General Public
-** License version 2.1 as published by the Free Software Foundation and
-** appearing in the file LICENSE.LGPL included in the packaging of this
-** file. Please review the following information to ensure the GNU Lesser
-** General Public License version 2.1 requirements will be met:
-** http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
-**
-** In addition, as a special exception, Nokia gives you certain additional
-** rights. These rights are described in the Nokia Qt LGPL Exception
-** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
-**
-** GNU General Public License Usage
-** Alternatively, this file may be used under the terms of the GNU General
-** Public License version 3.0 as published by the Free Software Foundation
-** and appearing in the file LICENSE.GPL included in the packaging of this
-** file. Please review the following information to ensure the GNU General
-** Public License version 3.0 requirements will be met:
-** http://www.gnu.org/copyleft/gpl.html.
-**
-** Other Usage
-** Alternatively, this file may be used in accordance with the terms and
-** conditions contained in a signed written agreement between you and Nokia.
-**
-**
-**
-**
-**
-** $QT_END_LICENSE$
-**
-****************************************************************************/
-
-#ifndef __DLA__
-#define __DLA__
-
-#define DEFAULT_TRIM_THRESHOLD ((size_t)4U * (size_t)1024U)
-
-#define MSPACES 0
-#define HAVE_MORECORE 1
-#define MORECORE_CONTIGUOUS 1
-#define HAVE_MMAP 0
-#define HAVE_MREMAP 0
-#define DEFAULT_GRANULARITY (4096U)
-#define FOOTERS 0
-#define USE_LOCKS 0
-#define INSECURE 1
-#define NO_MALLINFO 0
-
-#define LACKS_SYS_TYPES_H
-#ifndef LACKS_SYS_TYPES_H
-#include <sys/types.h> /* For size_t */
-#else
-#ifndef _SIZE_T_DECLARED
-typedef unsigned int size_t;
-#define _SIZE_T_DECLARED
-#endif
-#endif /* LACKS_SYS_TYPES_H */
-
-/* The maximum possible size_t value has all bits set */
-#define MAX_SIZE_T (~(size_t)0)
-
-#ifndef ONLY_MSPACES
- #define ONLY_MSPACES 0
-#endif /* ONLY_MSPACES */
-
-#ifndef MSPACES
- #if ONLY_MSPACES
- #define MSPACES 1
- #else /* ONLY_MSPACES */
- #define MSPACES 0
- #endif /* ONLY_MSPACES */
-#endif /* MSPACES */
-
-//#ifndef MALLOC_ALIGNMENT
-// #define MALLOC_ALIGNMENT ((size_t)8U)
-//#endif /* MALLOC_ALIGNMENT */
-
-#ifndef FOOTERS
- #define FOOTERS 0
-#endif /* FOOTERS */
-
-#ifndef ABORT
-// #define ABORT abort()
-// #define ABORT User::Invariant()// redefined so euser isn't dependant on oe
- #define ABORT HEAP_PANIC(ETHeapBadCellAddress)
-#endif /* ABORT */
-
-#ifndef PROCEED_ON_ERROR
- #define PROCEED_ON_ERROR 0
-#endif /* PROCEED_ON_ERROR */
-
-#ifndef USE_LOCKS
- #define USE_LOCKS 0
-#endif /* USE_LOCKS */
-
-#ifndef INSECURE
- #define INSECURE 0
-#endif /* INSECURE */
-
-#ifndef HAVE_MMAP
- #define HAVE_MMAP 1
-#endif /* HAVE_MMAP */
-
-#ifndef MMAP_CLEARS
- #define MMAP_CLEARS 1
-#endif /* MMAP_CLEARS */
-
-#ifndef HAVE_MREMAP
- #ifdef linux
- #define HAVE_MREMAP 1
- #else /* linux */
- #define HAVE_MREMAP 0
- #endif /* linux */
-#endif /* HAVE_MREMAP */
-
-#ifndef MALLOC_FAILURE_ACTION
- //#define MALLOC_FAILURE_ACTION errno = ENOMEM;
- #define MALLOC_FAILURE_ACTION ;
-#endif /* MALLOC_FAILURE_ACTION */
-
-#ifndef HAVE_MORECORE
- #if ONLY_MSPACES
- #define HAVE_MORECORE 1 /*AMOD: has changed */
- #else /* ONLY_MSPACES */
- #define HAVE_MORECORE 1
- #endif /* ONLY_MSPACES */
-#endif /* HAVE_MORECORE */
-
-#if !HAVE_MORECORE
- #define MORECORE_CONTIGUOUS 0
-#else /* !HAVE_MORECORE */
- #ifndef MORECORE
- #define MORECORE DLAdjust
- #endif /* MORECORE */
- #ifndef MORECORE_CONTIGUOUS
- #define MORECORE_CONTIGUOUS 0
- #endif /* MORECORE_CONTIGUOUS */
-#endif /* !HAVE_MORECORE */
-
-#ifndef DEFAULT_GRANULARITY
- #if MORECORE_CONTIGUOUS
- #define DEFAULT_GRANULARITY 4096 /* 0 means to compute in init_mparams */
- #else /* MORECORE_CONTIGUOUS */
- #define DEFAULT_GRANULARITY ((size_t)64U * (size_t)1024U)
- #endif /* MORECORE_CONTIGUOUS */
-#endif /* DEFAULT_GRANULARITY */
-
-#ifndef DEFAULT_TRIM_THRESHOLD
- #ifndef MORECORE_CANNOT_TRIM
- #define DEFAULT_TRIM_THRESHOLD ((size_t)2U * (size_t)1024U * (size_t)1024U)
- #else /* MORECORE_CANNOT_TRIM */
- #define DEFAULT_TRIM_THRESHOLD MAX_SIZE_T
- #endif /* MORECORE_CANNOT_TRIM */
-#endif /* DEFAULT_TRIM_THRESHOLD */
-
-#ifndef DEFAULT_MMAP_THRESHOLD
- #if HAVE_MMAP
- #define DEFAULT_MMAP_THRESHOLD ((size_t)256U * (size_t)1024U)
- #else /* HAVE_MMAP */
- #define DEFAULT_MMAP_THRESHOLD MAX_SIZE_T
- #endif /* HAVE_MMAP */
-#endif /* DEFAULT_MMAP_THRESHOLD */
-
-#ifndef USE_BUILTIN_FFS
- #define USE_BUILTIN_FFS 0
-#endif /* USE_BUILTIN_FFS */
-
-#ifndef USE_DEV_RANDOM
- #define USE_DEV_RANDOM 0
-#endif /* USE_DEV_RANDOM */
-
-#ifndef NO_MALLINFO
- #define NO_MALLINFO 0
-#endif /* NO_MALLINFO */
-#ifndef MALLINFO_FIELD_TYPE
- #define MALLINFO_FIELD_TYPE size_t
-#endif /* MALLINFO_FIELD_TYPE */
-
-/*
- mallopt tuning options. SVID/XPG defines four standard parameter
- numbers for mallopt, normally defined in malloc.h. None of these
- are used in this malloc, so setting them has no effect. But this
- malloc does support the following options.
-*/
-
-#define M_TRIM_THRESHOLD (-1)
-#define M_GRANULARITY (-2)
-#define M_MMAP_THRESHOLD (-3)
-
-#if !NO_MALLINFO
-/*
- This version of malloc supports the standard SVID/XPG mallinfo
- routine that returns a struct containing usage properties and
- statistics. It should work on any system that has a
- /usr/include/malloc.h defining struct mallinfo. The main
- declaration needed is the mallinfo struct that is returned (by-copy)
- by mallinfo(). The malloinfo struct contains a bunch of fields that
- are not even meaningful in this version of malloc. These fields are
- are instead filled by mallinfo() with other numbers that might be of
- interest.
-
- HAVE_USR_INCLUDE_MALLOC_H should be set if you have a
- /usr/include/malloc.h file that includes a declaration of struct
- mallinfo. If so, it is included; else a compliant version is
- declared below. These must be precisely the same for mallinfo() to
- work. The original SVID version of this struct, defined on most
- systems with mallinfo, declares all fields as ints. But some others
- define as unsigned long. If your system defines the fields using a
- type of different width than listed here, you MUST #include your
- system version and #define HAVE_USR_INCLUDE_MALLOC_H.
-*/
-
-/* #define HAVE_USR_INCLUDE_MALLOC_H */
-
-#ifdef HAVE_USR_INCLUDE_MALLOC_H
-#include "/usr/include/malloc.h"
-#else /* HAVE_USR_INCLUDE_MALLOC_H */
-
-struct mallinfo {
- MALLINFO_FIELD_TYPE iArena; /* non-mmapped space allocated from system */
- MALLINFO_FIELD_TYPE iOrdblks; /* number of free chunks */
- MALLINFO_FIELD_TYPE iSmblks; /* always 0 */
- MALLINFO_FIELD_TYPE iHblks; /* always 0 */
- MALLINFO_FIELD_TYPE iHblkhd; /* space in mmapped regions */
- MALLINFO_FIELD_TYPE iUsmblks; /* maximum total allocated space */
- MALLINFO_FIELD_TYPE iFsmblks; /* always 0 */
- MALLINFO_FIELD_TYPE iUordblks; /* total allocated space */
- MALLINFO_FIELD_TYPE iFordblks; /* total free space */
- MALLINFO_FIELD_TYPE iKeepcost; /* releasable (via malloc_trim) space */
- MALLINFO_FIELD_TYPE iCellCount;/* Number of chunks allocated*/
-};
-
-#endif /* HAVE_USR_INCLUDE_MALLOC_H */
-#endif /* NO_MALLINFO */
-
-#if MSPACES
- typedef void* mspace;
-#endif /* MSPACES */
-
-#if 0
-
-#include <stdio.h>/* for printing in malloc_stats */
-
-#ifndef LACKS_ERRNO_H
- #include <errno.h> /* for MALLOC_FAILURE_ACTION */
-#endif /* LACKS_ERRNO_H */
-
-#if FOOTERS
- #include <time.h> /* for iMagic initialization */
-#endif /* FOOTERS */
-
-#ifndef LACKS_STDLIB_H
- #include <stdlib.h> /* for abort() */
-#endif /* LACKS_STDLIB_H */
-
-#if !defined(ASSERT)
-#define ASSERT(x) __ASSERT_DEBUG(x, HEAP_PANIC(ETHeapBadCellAddress))
-#endif
-
-#ifndef LACKS_STRING_H
- #include <string.h> /* for memset etc */
-#endif /* LACKS_STRING_H */
-
-#if USE_BUILTIN_FFS
- #ifndef LACKS_STRINGS_H
- #include <strings.h> /* for ffs */
- #endif /* LACKS_STRINGS_H */
-#endif /* USE_BUILTIN_FFS */
-
-#if HAVE_MMAP
- #ifndef LACKS_SYS_MMAN_H
- #include <sys/mman.h> /* for mmap */
- #endif /* LACKS_SYS_MMAN_H */
- #ifndef LACKS_FCNTL_H
- #include <fcntl.h>
- #endif /* LACKS_FCNTL_H */
-#endif /* HAVE_MMAP */
-
-#if HAVE_MORECORE
- #ifndef LACKS_UNISTD_H
- #include <unistd.h> /* for sbrk */
- extern void* sbrk(size_t);
- #else /* LACKS_UNISTD_H */
- #if !defined(__FreeBSD__) && !defined(__OpenBSD__) && !defined(__NetBSD__)
- extern void* sbrk(ptrdiff_t);
- /*Amod sbrk is not defined in WIN32 need to check in symbian*/
- #endif /* FreeBSD etc */
- #endif /* LACKS_UNISTD_H */
-#endif /* HAVE_MORECORE */
-
-#endif
-
-/*AMOD: For MALLOC_GETPAGESIZE*/
-#if 0 // replaced with GET_PAGE_SIZE() defined in heap.cpp
-#ifndef WIN32
- #ifndef MALLOC_GETPAGESIZE
- #ifdef _SC_PAGESIZE /* some SVR4 systems omit an underscore */
- #ifndef _SC_PAGE_SIZE
- #define _SC_PAGE_SIZE _SC_PAGESIZE
- #endif
- #endif
- #ifdef _SC_PAGE_SIZE
- #define MALLOC_GETPAGESIZE sysconf(_SC_PAGE_SIZE)
- #else
- #if defined(BSD) || defined(DGUX) || defined(HAVE_GETPAGESIZE)
- extern size_t getpagesize();
- #define MALLOC_GETPAGESIZE getpagesize()
- #else
- #ifdef WIN32 /* use supplied emulation of getpagesize */
- #define MALLOC_GETPAGESIZE getpagesize()
- #else
- #ifndef LACKS_SYS_PARAM_H
- #include <sys/param.h>
- #endif
- #ifdef EXEC_PAGESIZE
- #define MALLOC_GETPAGESIZE EXEC_PAGESIZE
- #else
- #ifdef NBPG
- #ifndef CLSIZE
- #define MALLOC_GETPAGESIZE NBPG
- #else
- #define MALLOC_GETPAGESIZE (NBPG * CLSIZE)
- #endif
- #else
- #ifdef NBPC
- #define MALLOC_GETPAGESIZE NBPC
- #else
- #ifdef PAGESIZE
- #define MALLOC_GETPAGESIZE PAGESIZE
- #else /* just guess */
- #define MALLOC_GETPAGESIZE ((size_t)4096U)
- #endif
- #endif
- #endif
- #endif
- #endif
- #endif
- #endif
- #endif
-#endif
-#endif
-/*AMOD: For MALLOC_GETPAGESIZE*/
-
-/* ------------------- size_t and alignment properties -------------------- */
-
-/* The byte and bit size of a size_t */
-#define SIZE_T_SIZE (sizeof(size_t))
-#define SIZE_T_BITSIZE (sizeof(size_t) << 3)
-
-/* Some constants coerced to size_t */
-/* Annoying but necessary to avoid errors on some plaftorms */
-#define SIZE_T_ZERO ((size_t)0)
-#define SIZE_T_ONE ((size_t)1)
-#define SIZE_T_TWO ((size_t)2)
-#define TWO_SIZE_T_SIZES (SIZE_T_SIZE<<1)
-#define FOUR_SIZE_T_SIZES (SIZE_T_SIZE<<2)
-#define SIX_SIZE_T_SIZES (FOUR_SIZE_T_SIZES+TWO_SIZE_T_SIZES)
-#define HALF_MAX_SIZE_T (MAX_SIZE_T / 2U)
-
-/* The bit mask value corresponding to MALLOC_ALIGNMENT */
-#define CHUNK_ALIGN_MASK (MALLOC_ALIGNMENT - SIZE_T_ONE)
-
-/* True if address a has acceptable alignment */
-//#define IS_ALIGNED(A) (((size_t)((A)) & (CHUNK_ALIGN_MASK)) == 0)
-#define IS_ALIGNED(A) (((unsigned int)((A)) & (CHUNK_ALIGN_MASK)) == 0)
-
-/* the number of bytes to offset an address to align it */
-#define ALIGN_OFFSET(A)\
- ((((size_t)(A) & CHUNK_ALIGN_MASK) == 0)? 0 :\
- ((MALLOC_ALIGNMENT - ((size_t)(A) & CHUNK_ALIGN_MASK)) & CHUNK_ALIGN_MASK))
-
-/* -------------------------- MMAP preliminaries ------------------------- */
-
-/*
- If HAVE_MORECORE or HAVE_MMAP are false, we just define calls and
- checks to fail so compiler optimizer can delete code rather than
- using so many "#if"s.
-*/
-
-
-/* MORECORE and MMAP must return MFAIL on failure */
-#define MFAIL ((void*)(MAX_SIZE_T))
-#define CMFAIL ((TUint8*)(MFAIL)) /* defined for convenience */
-
-#if !HAVE_MMAP
- #define IS_MMAPPED_BIT (SIZE_T_ZERO)
- #define USE_MMAP_BIT (SIZE_T_ZERO)
- #define CALL_MMAP(s) MFAIL
- #define CALL_MUNMAP(a, s) (-1)
- #define DIRECT_MMAP(s) MFAIL
-#else /* !HAVE_MMAP */
- #define IS_MMAPPED_BIT (SIZE_T_ONE)
- #define USE_MMAP_BIT (SIZE_T_ONE)
- #ifndef WIN32
- #define CALL_MUNMAP(a, s) DLUMMAP((a),(s)) /*munmap((a), (s))*/
- #define MMAP_PROT (PROT_READ|PROT_WRITE)
- #if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
- #define MAP_ANONYMOUS MAP_ANON
- #endif /* MAP_ANON */
- #ifdef MAP_ANONYMOUS
- #define MMAP_FLAGS (MAP_PRIVATE|MAP_ANONYMOUS)
- #define CALL_MMAP(s) mmap(0, (s), MMAP_PROT, (int)MMAP_FLAGS, -1, 0)
- #else /* MAP_ANONYMOUS */
- /*
- Nearly all versions of mmap support MAP_ANONYMOUS, so the following
- is unlikely to be needed, but is supplied just in case.
- */
- #define MMAP_FLAGS (MAP_PRIVATE)
- //static int dev_zero_fd = -1; /* Cached file descriptor for /dev/zero. */
- #define CALL_MMAP(s) DLMMAP(s)
- /*#define CALL_MMAP(s) ((dev_zero_fd < 0) ? \
- (dev_zero_fd = open("/dev/zero", O_RDWR), \
- mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0)) : \
- mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0))
- */
- #define CALL_REMAP(a, s, d) DLREMAP((a),(s),(d))
- #endif /* MAP_ANONYMOUS */
- #define DIRECT_MMAP(s) CALL_MMAP(s)
- #else /* WIN32 */
- #define CALL_MMAP(s) win32mmap(s)
- #define CALL_MUNMAP(a, s) win32munmap((a), (s))
- #define DIRECT_MMAP(s) win32direct_mmap(s)
- #endif /* WIN32 */
-#endif /* HAVE_MMAP */
-
-#if HAVE_MMAP && HAVE_MREMAP
- #define CALL_MREMAP(addr, osz, nsz, mv) mremap((addr), (osz), (nsz), (mv))
-#else /* HAVE_MMAP && HAVE_MREMAP */
- #define CALL_MREMAP(addr, osz, nsz, mv) MFAIL
-#endif /* HAVE_MMAP && HAVE_MREMAP */
-
-#if HAVE_MORECORE
- #define CALL_MORECORE(S) SetBrk(S)
-#else /* HAVE_MORECORE */
- #define CALL_MORECORE(S) MFAIL
-#endif /* HAVE_MORECORE */
-
-/* mstate bit set if continguous morecore disabled or failed */
-#define USE_NONCONTIGUOUS_BIT (4U)
-
-/* segment bit set in create_mspace_with_base */
-#define EXTERN_BIT (8U)
-
-
-#if USE_LOCKS
-/*
- When locks are defined, there are up to two global locks:
- * If HAVE_MORECORE, iMorecoreMutex protects sequences of calls to
- MORECORE. In many cases sys_alloc requires two calls, that should
- not be interleaved with calls by other threads. This does not
- protect against direct calls to MORECORE by other threads not
- using this lock, so there is still code to cope the best we can on
- interference.
- * iMagicInitMutex ensures that mparams.iMagic and other
- unique mparams values are initialized only once.
-*/
- #ifndef WIN32
- /* By default use posix locks */
- #include <pthread.h>
- #define MLOCK_T pthread_mutex_t
- #define INITIAL_LOCK(l) pthread_mutex_init(l, NULL)
- #define ACQUIRE_LOCK(l) pthread_mutex_lock(l)
- #define RELEASE_LOCK(l) pthread_mutex_unlock(l)
-
- #if HAVE_MORECORE
- //static MLOCK_T iMorecoreMutex = PTHREAD_MUTEX_INITIALIZER;
- #endif /* HAVE_MORECORE */
- //static MLOCK_T iMagicInitMutex = PTHREAD_MUTEX_INITIALIZER;
- #else /* WIN32 */
- #define MLOCK_T long
- #define INITIAL_LOCK(l) *(l)=0
- #define ACQUIRE_LOCK(l) win32_acquire_lock(l)
- #define RELEASE_LOCK(l) win32_release_lock(l)
- #if HAVE_MORECORE
- static MLOCK_T iMorecoreMutex;
- #endif /* HAVE_MORECORE */
- static MLOCK_T iMagicInitMutex;
- #endif /* WIN32 */
- #define USE_LOCK_BIT (2U)
-#else /* USE_LOCKS */
- #define USE_LOCK_BIT (0U)
- #define INITIAL_LOCK(l)
-#endif /* USE_LOCKS */
-
-#if USE_LOCKS && HAVE_MORECORE
- #define ACQUIRE_MORECORE_LOCK(M) ACQUIRE_LOCK((M->iMorecoreMutex)/*&iMorecoreMutex*/);
- #define RELEASE_MORECORE_LOCK(M) RELEASE_LOCK((M->iMorecoreMutex)/*&iMorecoreMutex*/);
-#else /* USE_LOCKS && HAVE_MORECORE */
- #define ACQUIRE_MORECORE_LOCK(M)
- #define RELEASE_MORECORE_LOCK(M)
-#endif /* USE_LOCKS && HAVE_MORECORE */
-
-#if USE_LOCKS
- /*Currently not suporting this*/
- #define ACQUIRE_MAGIC_INIT_LOCK(M) ACQUIRE_LOCK(((M)->iMagicInitMutex));
- //AMOD: changed #define ACQUIRE_MAGIC_INIT_LOCK()
- //#define RELEASE_MAGIC_INIT_LOCK()
- #define RELEASE_MAGIC_INIT_LOCK(M) RELEASE_LOCK(((M)->iMagicInitMutex));
-#else /* USE_LOCKS */
- #define ACQUIRE_MAGIC_INIT_LOCK(M)
- #define RELEASE_MAGIC_INIT_LOCK(M)
-#endif /* USE_LOCKS */
-
-/*CHUNK representation*/
-struct malloc_chunk {
- size_t iPrevFoot; /* Size of previous chunk (if free). */
- size_t iHead; /* Size and inuse bits. */
- struct malloc_chunk* iFd; /* double links -- used only if free. */
- struct malloc_chunk* iBk;
-};
-
-typedef struct malloc_chunk mchunk;
-typedef struct malloc_chunk* mchunkptr;
-typedef struct malloc_chunk* sbinptr; /* The type of bins of chunks */
-typedef unsigned int bindex_t; /* Described below */
-typedef unsigned int binmap_t; /* Described below */
-typedef unsigned int flag_t; /* The type of various bit flag sets */
-
-
-/* ------------------- Chunks sizes and alignments ----------------------- */
-#define MCHUNK_SIZE (sizeof(mchunk))
-
-//#if FOOTERS
-// #define CHUNK_OVERHEAD (TWO_SIZE_T_SIZES)
-//#else /* FOOTERS */
-// #define CHUNK_OVERHEAD (SIZE_T_SIZE)
-//#endif /* FOOTERS */
-
-/* MMapped chunks need a second word of overhead ... */
-#define MMAP_CHUNK_OVERHEAD (TWO_SIZE_T_SIZES)
-/* ... and additional padding for fake next-chunk at foot */
-#define MMAP_FOOT_PAD (FOUR_SIZE_T_SIZES)
-
-/* The smallest size we can malloc is an aligned minimal chunk */
-#define MIN_CHUNK_SIZE ((MCHUNK_SIZE + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
-
-/* conversion from malloc headers to user pointers, and back */
-#define CHUNK2MEM(p) ((void*)((TUint8*)(p) + TWO_SIZE_T_SIZES))
-#define MEM2CHUNK(mem) ((mchunkptr)((TUint8*)(mem) - TWO_SIZE_T_SIZES))
-/* chunk associated with aligned address A */
-#define ALIGN_AS_CHUNK(A) (mchunkptr)((A) + ALIGN_OFFSET(CHUNK2MEM(A)))
-
-/* Bounds on request (not chunk) sizes. */
-#define MAX_REQUEST ((-MIN_CHUNK_SIZE) << 2)
-#define MIN_REQUEST (MIN_CHUNK_SIZE - CHUNK_OVERHEAD - SIZE_T_ONE)
-
-/* pad request bytes into a usable size */
-#define PAD_REQUEST(req) (((req) + CHUNK_OVERHEAD + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
-
-/* pad request, checking for minimum (but not maximum) */
-#define REQUEST2SIZE(req) (((req) < MIN_REQUEST)? MIN_CHUNK_SIZE : PAD_REQUEST(req))
-
-/* ------------------ Operations on iHead and foot fields ----------------- */
-
-/*
- The iHead field of a chunk is or'ed with PINUSE_BIT when previous
- adjacent chunk in use, and or'ed with CINUSE_BIT if this chunk is in
- use. If the chunk was obtained with mmap, the iPrevFoot field has
- IS_MMAPPED_BIT set, otherwise holding the offset of the base of the
- mmapped region to the base of the chunk.
-*/
-#define PINUSE_BIT (SIZE_T_ONE)
-#define CINUSE_BIT (SIZE_T_TWO)
-#define INUSE_BITS (PINUSE_BIT|CINUSE_BIT)
-
-/* Head value for fenceposts */
-#define FENCEPOST_HEAD (INUSE_BITS|SIZE_T_SIZE)
-
-/* extraction of fields from iHead words */
-#define CINUSE(p) ((p)->iHead & CINUSE_BIT)
-#define PINUSE(p) ((p)->iHead & PINUSE_BIT)
-#define CHUNKSIZE(p) ((p)->iHead & ~(INUSE_BITS))
-
-#define CLEAR_PINUSE(p) ((p)->iHead &= ~PINUSE_BIT)
-#define CLEAR_CINUSE(p) ((p)->iHead &= ~CINUSE_BIT)
-
-/* Treat space at ptr +/- offset as a chunk */
-#define CHUNK_PLUS_OFFSET(p, s) ((mchunkptr)(((TUint8*)(p)) + (s)))
-#define CHUNK_MINUS_OFFSET(p, s) ((mchunkptr)(((TUint8*)(p)) - (s)))
-
-/* Ptr to next or previous physical malloc_chunk. */
-#define NEXT_CHUNK(p) ((mchunkptr)( ((TUint8*)(p)) + ((p)->iHead & ~INUSE_BITS)))
-#define PREV_CHUNK(p) ((mchunkptr)( ((TUint8*)(p)) - ((p)->iPrevFoot) ))
-
-/* extract next chunk's PINUSE bit */
-#define NEXT_PINUSE(p) ((NEXT_CHUNK(p)->iHead) & PINUSE_BIT)
-
-/* Get/set size at footer */
-#define GET_FOOT(p, s) (((mchunkptr)((TUint8*)(p) + (s)))->iPrevFoot)
-#define SET_FOOT(p, s) (((mchunkptr)((TUint8*)(p) + (s)))->iPrevFoot = (s))
-
-/* Set size, PINUSE bit, and foot */
-#define SET_SIZE_AND_PINUSE_OF_FREE_CHUNK(p, s) ((p)->iHead = (s|PINUSE_BIT), SET_FOOT(p, s))
-
-/* Set size, PINUSE bit, foot, and clear next PINUSE */
-#define SET_FREE_WITH_PINUSE(p, s, n) (CLEAR_PINUSE(n), SET_SIZE_AND_PINUSE_OF_FREE_CHUNK(p, s))
-
-#define IS_MMAPPED(p) (!((p)->iHead & PINUSE_BIT) && ((p)->iPrevFoot & IS_MMAPPED_BIT))
-
-/* Get the internal overhead associated with chunk p */
-#define OVERHEAD_FOR(p) (IS_MMAPPED(p)? MMAP_CHUNK_OVERHEAD : CHUNK_OVERHEAD)
-
-/* Return true if malloced space is not necessarily cleared */
-#if MMAP_CLEARS
- #define CALLOC_MUST_CLEAR(p) (!IS_MMAPPED(p))
-#else /* MMAP_CLEARS */
- #define CALLOC_MUST_CLEAR(p) (1)
-#endif /* MMAP_CLEARS */
-
-/* ---------------------- Overlaid data structures ----------------------- */
-struct malloc_tree_chunk {
- /* The first four fields must be compatible with malloc_chunk */
- size_t iPrevFoot;
- size_t iHead;
- struct malloc_tree_chunk* iFd;
- struct malloc_tree_chunk* iBk;
-
- struct malloc_tree_chunk* iChild[2];
- struct malloc_tree_chunk* iParent;
- bindex_t iIndex;
-};
-
-typedef struct malloc_tree_chunk tchunk;
-typedef struct malloc_tree_chunk* tchunkptr;
-typedef struct malloc_tree_chunk* tbinptr; /* The type of bins of trees */
-
-/* A little helper macro for trees */
-#define LEFTMOST_CHILD(t) ((t)->iChild[0] != 0? (t)->iChild[0] : (t)->iChild[1])
-/*Segment structur*/
-//struct malloc_segment {
-// TUint8* iBase; /* base address */
-// size_t iSize; /* allocated size */
-//};
-
-#define IS_MMAPPED_SEGMENT(S) ((S)->iSflags & IS_MMAPPED_BIT)
-#define IS_EXTERN_SEGMENT(S) ((S)->iSflags & EXTERN_BIT)
-
-typedef struct malloc_segment msegment;
-typedef struct malloc_segment* msegmentptr;
-
-/*Malloc State data structur*/
-
-//#define NSMALLBINS (32U)
-//#define NTREEBINS (32U)
-#define SMALLBIN_SHIFT (3U)
-#define SMALLBIN_WIDTH (SIZE_T_ONE << SMALLBIN_SHIFT)
-#define TREEBIN_SHIFT (8U)
-#define MIN_LARGE_SIZE (SIZE_T_ONE << TREEBIN_SHIFT)
-#define MAX_SMALL_SIZE (MIN_LARGE_SIZE - SIZE_T_ONE)
-#define MAX_SMALL_REQUEST (MAX_SMALL_SIZE - CHUNK_ALIGN_MASK - CHUNK_OVERHEAD)
-
-/*struct malloc_state {
- binmap_t iSmallMap;
- binmap_t iTreeMap;
- size_t iDvSize;
- size_t iTopSize;
- mchunkptr iDv;
- mchunkptr iTop;
- size_t iTrimCheck;
- mchunkptr iSmallBins[(NSMALLBINS+1)*2];
- tbinptr iTreeBins[NTREEBINS];
- msegment iSeg;
- };*/
-/*
-struct malloc_state {
- binmap_t iSmallMap;
- binmap_t iTreeMap;
- size_t iDvSize;
- size_t iTopSize;
- TUint8* iLeastAddr;
- mchunkptr iDv;
- mchunkptr iTop;
- size_t iTrimCheck;
- size_t iMagic;
- mchunkptr iSmallBins[(NSMALLBINS+1)*2];
- tbinptr iTreeBins[NTREEBINS];
- size_t iFootprint;
- size_t iMaxFootprint;
- flag_t iMflags;
-#if USE_LOCKS
- MLOCK_T iMutex;
- MLOCK_T iMagicInitMutex;
- MLOCK_T iMorecoreMutex;
-#endif
- msegment iSeg;
-};
-*/
-typedef struct malloc_state* mstate;
-
-/* ------------- Global malloc_state and malloc_params ------------------- */
-
-/*
- malloc_params holds global properties, including those that can be
- dynamically set using mallopt. There is a single instance, mparams,
- initialized in init_mparams.
-*/
-
-struct malloc_params {
- size_t iMagic;
- size_t iPageSize;
- size_t iGranularity;
- size_t iMmapThreshold;
- size_t iTrimThreshold;
- flag_t iDefaultMflags;
-#if USE_LOCKS
- MLOCK_T iMagicInitMutex;
-#endif /* USE_LOCKS */
-};
-
-/* The global malloc_state used for all non-"mspace" calls */
-/*AMOD: Need to check this as this will be the member of the class*/
-
-//static struct malloc_state _gm_;
-//#define GM (&_gm_)
-
-//#define IS_GLOBAL(M) ((M) == &_gm_)
-/*AMOD: has changed*/
-#define IS_GLOBAL(M) ((M) == GM)
-#define IS_INITIALIZED(M) ((M)->iTop != 0)
-
-/* -------------------------- system alloc setup ------------------------- */
-
-/* Operations on iMflags */
-
-#define USE_LOCK(M) ((M)->iMflags & USE_LOCK_BIT)
-#define ENABLE_LOCK(M) ((M)->iMflags |= USE_LOCK_BIT)
-#define DISABLE_LOCK(M) ((M)->iMflags &= ~USE_LOCK_BIT)
-
-#define USE_MMAP(M) ((M)->iMflags & USE_MMAP_BIT)
-#define ENABLE_MMAP(M) ((M)->iMflags |= USE_MMAP_BIT)
-#define DISABLE_MMAP(M) ((M)->iMflags &= ~USE_MMAP_BIT)
-
-#define USE_NONCONTIGUOUS(M) ((M)->iMflags & USE_NONCONTIGUOUS_BIT)
-#define DISABLE_CONTIGUOUS(M) ((M)->iMflags |= USE_NONCONTIGUOUS_BIT)
-
-#define SET_LOCK(M,L) ((M)->iMflags = (L)? ((M)->iMflags | USE_LOCK_BIT) : ((M)->iMflags & ~USE_LOCK_BIT))
-
-/* page-align a size */
-#define PAGE_ALIGN(S) (((S) + (mparams.iPageSize)) & ~(mparams.iPageSize - SIZE_T_ONE))
-
-/* iGranularity-align a size */
-#define GRANULARITY_ALIGN(S) (((S) + (mparams.iGranularity)) & ~(mparams.iGranularity - SIZE_T_ONE))
-
-#define IS_PAGE_ALIGNED(S) (((size_t)(S) & (mparams.iPageSize - SIZE_T_ONE)) == 0)
-#define IS_GRANULARITY_ALIGNED(S) (((size_t)(S) & (mparams.iGranularity - SIZE_T_ONE)) == 0)
-
-/* True if segment S holds address A */
-#define SEGMENT_HOLDS(S, A) ((TUint8*)(A) >= S->iBase && (TUint8*)(A) < S->iBase + S->iSize)
-
-#ifndef MORECORE_CANNOT_TRIM
- #define SHOULD_TRIM(M,s) ((s) > (M)->iTrimCheck)
-#else /* MORECORE_CANNOT_TRIM */
- #define SHOULD_TRIM(M,s) (0)
-#endif /* MORECORE_CANNOT_TRIM */
-
-/*
- TOP_FOOT_SIZE is padding at the end of a segment, including space
- that may be needed to place segment records and fenceposts when new
- noncontiguous segments are added.
-*/
-#define TOP_FOOT_SIZE (ALIGN_OFFSET(CHUNK2MEM(0))+PAD_REQUEST(sizeof(struct malloc_segment))+MIN_CHUNK_SIZE)
-
-#define SYS_ALLOC_PADDING (TOP_FOOT_SIZE + MALLOC_ALIGNMENT)
-/* ------------------------------- Hooks -------------------------------- */
-
-/*
- PREACTION should be defined to return 0 on success, and nonzero on
- failure. If you are not using locking, you can redefine these to do
- anything you like.
-*/
-
-#if USE_LOCKS
- /* Ensure locks are initialized */
- #define GLOBALLY_INITIALIZE() (mparams.iPageSize == 0 && init_mparams())
- #define PREACTION(M) (USE_LOCK((M))?(ACQUIRE_LOCK((M)->iMutex),0):0) /*Action to take like lock before alloc*/
- #define POSTACTION(M) { if (USE_LOCK(M)) RELEASE_LOCK((M)->iMutex); }
-
-#else /* USE_LOCKS */
- #ifndef PREACTION
- #define PREACTION(M) (0)
- #endif /* PREACTION */
- #ifndef POSTACTION
- #define POSTACTION(M)
- #endif /* POSTACTION */
-#endif /* USE_LOCKS */
-
-/*
- CORRUPTION_ERROR_ACTION is triggered upon detected bad addresses.
- USAGE_ERROR_ACTION is triggered on detected bad frees and
- reallocs. The argument p is an address that might have triggered the
- fault. It is ignored by the two predefined actions, but might be
- useful in custom actions that try to help diagnose errors.
-*/
-
-#if PROCEED_ON_ERROR
- /* A count of the number of corruption errors causing resets */
- int malloc_corruption_error_count;
- /* default corruption action */
- static void ResetOnError(mstate m);
- #define CORRUPTION_ERROR_ACTION(m) ResetOnError(m)
- #define USAGE_ERROR_ACTION(m, p)
-#else /* PROCEED_ON_ERROR */
- #ifndef CORRUPTION_ERROR_ACTION
- #define CORRUPTION_ERROR_ACTION(m) ABORT
- #endif /* CORRUPTION_ERROR_ACTION */
- #ifndef USAGE_ERROR_ACTION
- #define USAGE_ERROR_ACTION(m,p) ABORT
- #endif /* USAGE_ERROR_ACTION */
-#endif /* PROCEED_ON_ERROR */
-
-
-#ifdef _DEBUG
- #define CHECK_FREE_CHUNK(M,P) DoCheckFreeChunk(M,P)
- #define CHECK_INUSE_CHUNK(M,P) DoCheckInuseChunk(M,P)
- #define CHECK_TOP_CHUNK(M,P) DoCheckTopChunk(M,P)
- #define CHECK_MALLOCED_CHUNK(M,P,N) DoCheckMallocedChunk(M,P,N)
- #define CHECK_MMAPPED_CHUNK(M,P) DoCheckMmappedChunk(M,P)
- #define CHECK_MALLOC_STATE(M) DoCheckMallocState(M)
-#else /* DEBUG */
- #define CHECK_FREE_CHUNK(M,P)
- #define CHECK_INUSE_CHUNK(M,P)
- #define CHECK_MALLOCED_CHUNK(M,P,N)
- #define CHECK_MMAPPED_CHUNK(M,P)
- #define CHECK_MALLOC_STATE(M)
- #define CHECK_TOP_CHUNK(M,P)
-#endif /* DEBUG */
-
-/* ---------------------------- Indexing Bins ---------------------------- */
-
-#define IS_SMALL(s) (((s) >> SMALLBIN_SHIFT) < NSMALLBINS)
-#define SMALL_INDEX(s) ((s) >> SMALLBIN_SHIFT)
-#define SMALL_INDEX2SIZE(i) ((i) << SMALLBIN_SHIFT)
-#define MIN_SMALL_INDEX (SMALL_INDEX(MIN_CHUNK_SIZE))
-
-/* addressing by index. See above about smallbin repositioning */
-#define SMALLBIN_AT(M, i) ((sbinptr)((TUint8*)&((M)->iSmallBins[(i)<<1])))
-#define TREEBIN_AT(M,i) (&((M)->iTreeBins[i]))
-
-
-/* Bit representing maximum resolved size in a treebin at i */
-#define BIT_FOR_TREE_INDEX(i) (i == NTREEBINS-1)? (SIZE_T_BITSIZE-1) : (((i) >> 1) + TREEBIN_SHIFT - 2)
-
-/* Shift placing maximum resolved bit in a treebin at i as sign bit */
-#define LEFTSHIFT_FOR_TREE_INDEX(i) ((i == NTREEBINS-1)? 0 : ((SIZE_T_BITSIZE-SIZE_T_ONE) - (((i) >> 1) + TREEBIN_SHIFT - 2)))
-
-/* The size of the smallest chunk held in bin with index i */
-#define MINSIZE_FOR_TREE_INDEX(i) ((SIZE_T_ONE << (((i) >> 1) + TREEBIN_SHIFT)) | (((size_t)((i) & SIZE_T_ONE)) << (((i) >> 1) + TREEBIN_SHIFT - 1)))
-
-
-/* ------------------------ Operations on bin maps ----------------------- */
-/* bit corresponding to given index */
-#define IDX2BIT(i) ((binmap_t)(1) << (i))
-/* Mark/Clear bits with given index */
-#define MARK_SMALLMAP(M,i) ((M)->iSmallMap |= IDX2BIT(i))
-#define CLEAR_SMALLMAP(M,i) ((M)->iSmallMap &= ~IDX2BIT(i))
-#define SMALLMAP_IS_MARKED(M,i) ((M)->iSmallMap & IDX2BIT(i))
-#define MARK_TREEMAP(M,i) ((M)->iTreeMap |= IDX2BIT(i))
-#define CLEAR_TREEMAP(M,i) ((M)->iTreeMap &= ~IDX2BIT(i))
-#define TREEMAP_IS_MARKED(M,i) ((M)->iTreeMap & IDX2BIT(i))
-
- /* isolate the least set bit of a bitmap */
-#define LEAST_BIT(x) ((x) & -(x))
-
-/* mask with all bits to left of least bit of x on */
-#define LEFT_BITS(x) ((x<<1) | -(x<<1))
-
-/* mask with all bits to left of or equal to least bit of x on */
-#define SAME_OR_LEFT_BITS(x) ((x) | -(x))
-
-#if !INSECURE
- /* Check if address a is at least as high as any from MORECORE or MMAP */
- #define OK_ADDRESS(M, a) ((TUint8*)(a) >= (M)->iLeastAddr)
- /* Check if address of next chunk n is higher than base chunk p */
- #define OK_NEXT(p, n) ((TUint8*)(p) < (TUint8*)(n))
- /* Check if p has its CINUSE bit on */
- #define OK_CINUSE(p) CINUSE(p)
- /* Check if p has its PINUSE bit on */
- #define OK_PINUSE(p) PINUSE(p)
-#else /* !INSECURE */
- #define OK_ADDRESS(M, a) (1)
- #define OK_NEXT(b, n) (1)
- #define OK_CINUSE(p) (1)
- #define OK_PINUSE(p) (1)
-#endif /* !INSECURE */
-
-#if (FOOTERS && !INSECURE)
- /* Check if (alleged) mstate m has expected iMagic field */
- #define OK_MAGIC(M) ((M)->iMagic == mparams.iMagic)
-#else /* (FOOTERS && !INSECURE) */
- #define OK_MAGIC(M) (1)
-#endif /* (FOOTERS && !INSECURE) */
-
-/* In gcc, use __builtin_expect to minimize impact of checks */
-#if !INSECURE
- #if defined(__GNUC__) && __GNUC__ >= 3
- #define RTCHECK(e) __builtin_expect(e, 1)
- #else /* GNUC */
- #define RTCHECK(e) (e)
- #endif /* GNUC */
-
-#else /* !INSECURE */
- #define RTCHECK(e) (1)
-#endif /* !INSECURE */
-/* macros to set up inuse chunks with or without footers */
-#if !FOOTERS
- #define MARK_INUSE_FOOT(M,p,s)
- /* Set CINUSE bit and PINUSE bit of next chunk */
- #define SET_INUSE(M,p,s) ((p)->iHead = (((p)->iHead & PINUSE_BIT)|s|CINUSE_BIT),((mchunkptr)(((TUint8*)(p)) + (s)))->iHead |= PINUSE_BIT)
- /* Set CINUSE and PINUSE of this chunk and PINUSE of next chunk */
- #define SET_INUSE_AND_PINUSE(M,p,s) ((p)->iHead = (s|PINUSE_BIT|CINUSE_BIT),((mchunkptr)(((TUint8*)(p)) + (s)))->iHead |= PINUSE_BIT)
- /* Set size, CINUSE and PINUSE bit of this chunk */
- #define SET_SIZE_AND_PINUSE_OF_INUSE_CHUNK(M, p, s) ((p)->iHead = (s|PINUSE_BIT|CINUSE_BIT))
-#else /* FOOTERS */
- /* Set foot of inuse chunk to be xor of mstate and seed */
- #define MARK_INUSE_FOOT(M,p,s) (((mchunkptr)((TUint8*)(p) + (s)))->iPrevFoot = ((size_t)(M) ^ mparams.iMagic))
- #define GET_MSTATE_FOR(p) ((mstate)(((mchunkptr)((TUint8*)(p)+(CHUNKSIZE(p))))->iPrevFoot ^ mparams.iMagic))
- #define SET_INUSE(M,p,s)\
- ((p)->iHead = (((p)->iHead & PINUSE_BIT)|s|CINUSE_BIT),\
- (((mchunkptr)(((TUint8*)(p)) + (s)))->iHead |= PINUSE_BIT), \
- MARK_INUSE_FOOT(M,p,s))
- #define SET_INUSE_AND_PINUSE(M,p,s)\
- ((p)->iHead = (s|PINUSE_BIT|CINUSE_BIT),\
- (((mchunkptr)(((TUint8*)(p)) + (s)))->iHead |= PINUSE_BIT),\
- MARK_INUSE_FOOT(M,p,s))
- #define SET_SIZE_AND_PINUSE_OF_INUSE_CHUNK(M, p, s)\
- ((p)->iHead = (s|PINUSE_BIT|CINUSE_BIT),\
- MARK_INUSE_FOOT(M, p, s))
-#endif /* !FOOTERS */
-
-
-#if ONLY_MSPACES
-#define INTERNAL_MALLOC(m, b) mspace_malloc(m, b)
-#define INTERNAL_FREE(m, mem) mspace_free(m,mem);
-#else /* ONLY_MSPACES */
- #if MSPACES
- #define INTERNAL_MALLOC(m, b) (m == GM)? dlmalloc(b) : mspace_malloc(m, b)
- #define INTERNAL_FREE(m, mem) if (m == GM) dlfree(mem); else mspace_free(m,mem);
- #else /* MSPACES */
- #define INTERNAL_MALLOC(m, b) dlmalloc(b)
- #define INTERNAL_FREE(m, mem) dlfree(mem)
- #endif /* MSPACES */
-#endif /* ONLY_MSPACES */
-
- #ifndef NDEBUG
- #define CHECKING 1
- #endif
-// #define HYSTERESIS 4
- #define HYSTERESIS 1
- #define HYSTERESIS_BYTES (2*PAGESIZE)
- #define HYSTERESIS_GROW (HYSTERESIS*PAGESIZE)
-
- #if CHECKING
- #define CHECK(x) x
- #else
- #undef ASSERT
- #define ASSERT(x) (void)0
- #define CHECK(x) (void)0
- #endif
-
-#endif/*__DLA__*/
+++ /dev/null
-/****************************************************************************
-**
-** Copyright (C) 2011 Nokia Corporation and/or its subsidiary(-ies).
-** All rights reserved.
-** Contact: Nokia Corporation (qt-info@nokia.com)
-**
-** This file is part of the QtCore module of the Qt Toolkit.
-**
-** $QT_BEGIN_LICENSE:LGPL$
-** GNU Lesser General Public License Usage
-** This file may be used under the terms of the GNU Lesser General Public
-** License version 2.1 as published by the Free Software Foundation and
-** appearing in the file LICENSE.LGPL included in the packaging of this
-** file. Please review the following information to ensure the GNU Lesser
-** General Public License version 2.1 requirements will be met:
-** http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
-**
-** In addition, as a special exception, Nokia gives you certain additional
-** rights. These rights are described in the Nokia Qt LGPL Exception
-** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
-**
-** GNU General Public License Usage
-** Alternatively, this file may be used under the terms of the GNU General
-** Public License version 3.0 as published by the Free Software Foundation
-** and appearing in the file LICENSE.GPL included in the packaging of this
-** file. Please review the following information to ensure the GNU General
-** Public License version 3.0 requirements will be met:
-** http://www.gnu.org/copyleft/gpl.html.
-**
-** Other Usage
-** Alternatively, this file may be used in accordance with the terms and
-** conditions contained in a signed written agreement between you and Nokia.
-**
-**
-**
-**
-**
-** $QT_END_LICENSE$
-**
-****************************************************************************/
-
-#include "qt_hybridheap_symbian_p.h"
-
-#ifdef QT_USE_NEW_SYMBIAN_ALLOCATOR
-
-// if non zero this causes the iSlabs to be configured only when the chunk size exceeds this level
-#define DELAYED_SLAB_THRESHOLD (64*1024) // 64KB seems about right based on trace data
-#define SLAB_CONFIG 0x3fff // Use all slab sizes 4,8..56 bytes. This is more efficient for large heaps as Qt tends to have
-
-#ifdef _DEBUG
-#define __SIMULATE_ALLOC_FAIL(s) if (CheckForSimulatedAllocFail()) {s}
-#define __ALLOC_DEBUG_HEADER(s) (s += EDebugHdrSize)
-#define __SET_DEBUG_DATA(p,n,c) (((SDebugCell*)(p))->nestingLevel = (n), ((SDebugCell*)(p))->allocCount = (c))
-#define __GET_USER_DATA_BFR(p) ((p!=0) ? (TUint8*)(p) + EDebugHdrSize : NULL)
-#define __GET_DEBUG_DATA_BFR(p) ((p!=0) ? (TUint8*)(p) - EDebugHdrSize : NULL)
-#define __ZAP_CELL(p) memset( (TUint8*)p, 0xde, (AllocLen(__GET_USER_DATA_BFR(p))+EDebugHdrSize))
-#define __DEBUG_SAVE(p) TInt dbgNestLevel = ((SDebugCell*)p)->nestingLevel
-#define __DEBUG_RESTORE(p) if (p) {((SDebugCell*)p)->nestingLevel = dbgNestLevel;}
-#define __DEBUG_HDR_SIZE EDebugHdrSize
-#define __REMOVE_DBG_HDR(n) (n*EDebugHdrSize)
-#define __GET_AVAIL_BLOCK_SIZE(s) ( (s<EDebugHdrSize) ? 0 : s-EDebugHdrSize )
-#define __UPDATE_ALLOC_COUNT(o,n,c) if (o!=n && n) {((SDebugCell*)n)->allocCount = (c);}
-#define __INIT_COUNTERS(i) iCellCount=i,iTotalAllocSize=i
-#define __INCREMENT_COUNTERS(p) iCellCount++, iTotalAllocSize += AllocLen(p)
-#define __DECREMENT_COUNTERS(p) iCellCount--, iTotalAllocSize -= AllocLen(p)
-#define __UPDATE_TOTAL_ALLOC(p,s) iTotalAllocSize += (AllocLen(__GET_USER_DATA_BFR(p)) - s)
-
-#else
-#define __SIMULATE_ALLOC_FAIL(s)
-#define __ALLOC_DEBUG_HEADER(s)
-#define __SET_DEBUG_DATA(p,n,c)
-#define __GET_USER_DATA_BFR(p) (p)
-#define __GET_DEBUG_DATA_BFR(p) (p)
-#define __ZAP_CELL(p)
-#define __DEBUG_SAVE(p)
-#define __DEBUG_RESTORE(p)
-#define __DEBUG_HDR_SIZE 0
-#define __REMOVE_DBG_HDR(n) 0
-#define __GET_AVAIL_BLOCK_SIZE(s) (s)
-#define __UPDATE_ALLOC_COUNT(o,n,c)
-#define __INIT_COUNTERS(i) iCellCount=i,iTotalAllocSize=i
-#define __INCREMENT_COUNTERS(p)
-#define __DECREMENT_COUNTERS(p)
-#define __UPDATE_TOTAL_ALLOC(p,s)
-
-#endif
-
-
-#define MEMORY_MONITORED (iFlags & EMonitorMemory)
-#define GM (&iGlobalMallocState)
-#define IS_FIXED_HEAP (iFlags & EFixedSize)
-#define __INIT_COUNTERS(i) iCellCount=i,iTotalAllocSize=i
-#define __POWER_OF_2(x) (!((x)&((x)-1)))
-
-#define __DL_BFR_CHECK(M,P) \
- if ( MEMORY_MONITORED ) \
- if ( !IS_ALIGNED(P) || ((TUint8*)(P)<M->iSeg.iBase) || ((TUint8*)(P)>(M->iSeg.iBase+M->iSeg.iSize))) \
- BTraceContext12(BTrace::EHeap, BTrace::EHeapCorruption, (TUint32)this, (TUint32)P, (TUint32)0), HEAP_PANIC(ETHeapBadCellAddress); \
- else DoCheckInuseChunk(M, MEM2CHUNK(P))
-
-#ifndef __KERNEL_MODE__
-
-#define __SLAB_BFR_CHECK(S,P,B) \
- if ( MEMORY_MONITORED ) \
- if ( ((TUint32)P & 0x3) || ((TUint8*)P<iMemBase) || ((TUint8*)(P)>(TUint8*)this)) \
- BTraceContext12(BTrace::EHeap, BTrace::EHeapCorruption, (TUint32)this, (TUint32)P, (TUint32)S), HEAP_PANIC(ETHeapBadCellAddress); \
- else DoCheckSlab(S, EPartialFullSlab, P), BuildPartialSlabBitmap(B,S,P)
-#define __PAGE_BFR_CHECK(P) \
- if ( MEMORY_MONITORED ) \
- if ( ((TUint32)P & ((1 << iPageSize)-1)) || ((TUint8*)P<iMemBase) || ((TUint8*)(P)>(TUint8*)this)) \
- BTraceContext12(BTrace::EHeap, BTrace::EHeapCorruption, (TUint32)this, (TUint32)P, (TUint32)0), HEAP_PANIC(ETHeapBadCellAddress)
-
-#endif
-
-#ifdef _MSC_VER
-// This is required while we are still using VC6 to compile, so as to avoid warnings that cannot be fixed
-// without having to edit the original Doug Lea source. The 4146 warnings are due to the original code having
-// a liking for negating unsigned numbers and the 4127 warnings are due to the original code using the RTCHECK
-// macro with values that are always defined as 1. It is better to turn these warnings off than to introduce
-// diffs between the original Doug Lea implementation and our adaptation of it
-#pragma warning( disable : 4146 ) /* unary minus operator applied to unsigned type, result still unsigned */
-#pragma warning( disable : 4127 ) /* conditional expression is constant */
-#endif // _MSC_VER
-
-
-/**
-@SYMPatchable
-@publishedPartner
-@released
-
-Defines the minimum cell size of a heap.
-
-The constant can be changed at ROM build time using patchdata OBY keyword.
-
-@deprecated Patching this constant no longer has any effect.
-*/
-#ifdef __X86GCC__ // For X86GCC we don't use the proper data import attribute
-#undef IMPORT_D // since the constants are not really imported. GCC doesn't
-#define IMPORT_D // allow imports from self.
-#endif
-IMPORT_D extern const TInt KHeapMinCellSize;
-
-/**
-@SYMPatchable
-@publishedPartner
-@released
-
-This constant defines the ratio that determines the amount of hysteresis between heap growing and heap
-shrinking.
-It is a 32-bit fixed point number where the radix point is defined to be
-between bits 7 and 8 (where the LSB is bit 0) i.e. using standard notation, a Q8 or a fx24.8
-fixed point number. For example, for a ratio of 2.0, set KHeapShrinkHysRatio=0x200.
-
-The heap shrinking hysteresis value is calculated to be:
-@code
-KHeapShrinkHysRatio*(iGrowBy>>8)
-@endcode
-where iGrowBy is a page aligned value set by the argument, aGrowBy, to the RHeap constructor.
-The default hysteresis value is iGrowBy bytes i.e. KHeapShrinkHysRatio=2.0.
-
-Memory usage may be improved by reducing the heap shrinking hysteresis
-by setting 1.0 < KHeapShrinkHysRatio < 2.0. Heap shrinking hysteresis is disabled/removed
-when KHeapShrinkHysRatio <= 1.0.
-
-The constant can be changed at ROM build time using patchdata OBY keyword.
-*/
-IMPORT_D extern const TInt KHeapShrinkHysRatio;
-
-UEXPORT_C TInt RHeap::AllocLen(const TAny* aCell) const
-{
- const MAllocator* m = this;
- return m->AllocLen(aCell);
-}
-
-UEXPORT_C TAny* RHeap::Alloc(TInt aSize)
-{
- const MAllocator* m = this;
- return ((MAllocator*)m)->Alloc(aSize);
-}
-
-UEXPORT_C void RHeap::Free(TAny* aCell)
-{
- const MAllocator* m = this;
- ((MAllocator*)m)->Free(aCell);
-}
-
-UEXPORT_C TAny* RHeap::ReAlloc(TAny* aCell, TInt aSize, TInt aMode)
-{
- const MAllocator* m = this;
- return ((MAllocator*)m)->ReAlloc(aCell, aSize, aMode);
-}
-
-UEXPORT_C TInt RHeap::DebugFunction(TInt aFunc, TAny* a1, TAny* a2)
-{
- const MAllocator* m = this;
- return ((MAllocator*)m)->DebugFunction(aFunc, a1, a2);
-}
-
-UEXPORT_C TInt RHeap::Extension_(TUint aExtensionId, TAny*& a0, TAny* a1)
-{
- const MAllocator* m = this;
- return ((MAllocator*)m)->Extension_(aExtensionId, a0, a1);
-}
-
-#ifndef __KERNEL_MODE__
-
-EXPORT_C TInt RHeap::AllocSize(TInt& aTotalAllocSize) const
-{
- const MAllocator* m = this;
- return m->AllocSize(aTotalAllocSize);
-}
-
-EXPORT_C TInt RHeap::Available(TInt& aBiggestBlock) const
-{
- const MAllocator* m = this;
- return m->Available(aBiggestBlock);
-}
-
-EXPORT_C void RHeap::Reset()
-{
- const MAllocator* m = this;
- ((MAllocator*)m)->Reset();
-}
-
-EXPORT_C TInt RHeap::Compress()
-{
- const MAllocator* m = this;
- return ((MAllocator*)m)->Compress();
-}
-#endif
-
-RHybridHeap::RHybridHeap()
- {
- // This initialisation cannot be done in RHeap() for compatibility reasons
- iMaxLength = iChunkHandle = iNestingLevel = 0;
- iTop = NULL;
- iFailType = ENone;
- iTestData = NULL;
- }
-
-void RHybridHeap::operator delete(TAny*, TAny*)
-/**
-Called if constructor issued by operator new(TUint aSize, TAny* aBase) throws exception.
-This is dummy as corresponding new operator does not allocate memory.
-*/
-{}
-
-
-#ifndef __KERNEL_MODE__
-void RHybridHeap::Lock() const
- /**
- @internalComponent
-*/
- {((RFastLock&)iLock).Wait();}
-
-
-void RHybridHeap::Unlock() const
- /**
- @internalComponent
-*/
- {((RFastLock&)iLock).Signal();}
-
-
-TInt RHybridHeap::ChunkHandle() const
- /**
- @internalComponent
-*/
-{
- return iChunkHandle;
-}
-
-#else
-//
-// This method is implemented in kheap.cpp
-//
-//void RHybridHeap::Lock() const
- /**
- @internalComponent
-*/
-// {;}
-
-
-
-//
-// This method is implemented in kheap.cpp
-//
-//void RHybridHeap::Unlock() const
- /**
- @internalComponent
-*/
-// {;}
-
-
-TInt RHybridHeap::ChunkHandle() const
- /**
- @internalComponent
-*/
-{
- return 0;
-}
-#endif
-
-RHybridHeap::RHybridHeap(TInt aChunkHandle, TInt aOffset, TInt aMinLength, TInt aMaxLength, TInt aGrowBy, TInt aAlign, TBool aSingleThread, TBool aDLOnly, TBool aUseAdjust)
-/**
-Constructor for a non fixed heap. Unlike the fixed heap, this heap is quite flexible in terms of its minimum and
-maximum lengths and in that it can use the hybrid allocator if all of its requirements are met.
-*/
- : iOffset(aOffset), iChunkSize(aMinLength)
- {
- __ASSERT_ALWAYS(iOffset>=0, HEAP_PANIC(ETHeapNewBadOffset));
-
- iChunkHandle = aChunkHandle;
- iMinLength = aMinLength;
- iMaxLength = aMaxLength;
-
- // If the user has explicitly specified 0 as the aGrowBy value, set it to 1 so that it will be rounded up to the nearst page size
- if (aGrowBy == 0)
- aGrowBy = 1;
- GET_PAGE_SIZE(iPageSize);
- iGrowBy = _ALIGN_UP(aGrowBy, iPageSize);
-
- Construct(aSingleThread, aDLOnly, aUseAdjust, aAlign);
- }
-
-RHybridHeap::RHybridHeap(TInt aMaxLength, TInt aAlign, TBool aSingleThread)
-/**
-Constructor for a fixed heap. We have restrictions in that we have fixed minimum and maximum lengths and cannot grow
-and we only use DL allocator.
-*/
- : iOffset(0), iChunkSize(aMaxLength)
- {
- iChunkHandle = NULL;
- iMinLength = aMaxLength;
- iMaxLength = aMaxLength;
- iGrowBy = 0;
-
- Construct(aSingleThread, ETrue, ETrue, aAlign);
- }
-
-TAny* RHybridHeap::operator new(TUint aSize, TAny* aBase) __NO_THROW
-{
- __ASSERT_ALWAYS(aSize>=sizeof(RHybridHeap), HEAP_PANIC(ETHeapNewBadSize));
- RHybridHeap* h = (RHybridHeap*)aBase;
- h->iBase = ((TUint8*)aBase) + aSize;
- return aBase;
-}
-
-void RHybridHeap::Construct(TBool aSingleThread, TBool aDLOnly, TBool aUseAdjust, TInt aAlign)
-{
- iAlign = aAlign ? aAlign : RHybridHeap::ECellAlignment;
- __ASSERT_ALWAYS((TUint32)iAlign>=sizeof(TAny*) && __POWER_OF_2(iAlign), HEAP_PANIC(ETHeapNewBadAlignment));
-
- // This initialisation cannot be done in RHeap() for compatibility reasons
- iTop = NULL;
- iFailType = ENone;
- iNestingLevel = 0;
- iTestData = NULL;
-
- iHighWaterMark = iMinLength;
- iAllocCount = 0;
- iFlags = aSingleThread ? ESingleThreaded : 0;
- iGrowBy = _ALIGN_UP(iGrowBy, iPageSize);
-
- if ( iMinLength == iMaxLength )
- {
- iFlags |= EFixedSize;
- aDLOnly = ETrue;
- }
-#ifndef __KERNEL_MODE__
-#ifdef DELAYED_SLAB_THRESHOLD
- iSlabInitThreshold = DELAYED_SLAB_THRESHOLD;
-#else
- iSlabInitThreshold = 0;
-#endif // DELAYED_SLAB_THRESHOLD
- iUseAdjust = aUseAdjust;
- iDLOnly = aDLOnly;
-#else
- (void)aUseAdjust;
-#endif
- // Initialise suballocators
- // if DL only is required then it cannot allocate slab or page memory
- // so these sub-allocators should be disabled. Otherwise initialise with default values
- if ( aDLOnly )
- {
- Init(0, 0);
- }
- else
- {
- Init(SLAB_CONFIG, 16);
- }
-
-#ifdef ENABLE_BTRACE
-
- TUint32 traceData[4];
- traceData[0] = iMinLength;
- traceData[1] = iMaxLength;
- traceData[2] = iGrowBy;
- traceData[3] = iAlign;
- BTraceContextN(BTrace::ETest1, 90, (TUint32)this, 11, traceData, sizeof(traceData));
-#endif
-
-}
-
-#ifndef __KERNEL_MODE__
-TInt RHybridHeap::ConstructLock(TUint32 aMode)
-{
- TBool duplicateLock = EFalse;
- TInt r = KErrNone;
- if (!(iFlags & ESingleThreaded))
- {
- duplicateLock = aMode & UserHeap::EChunkHeapSwitchTo;
- r = iLock.CreateLocal(duplicateLock ? EOwnerThread : EOwnerProcess);
- if( r != KErrNone)
- {
- iChunkHandle = 0;
- return r;
- }
- }
-
- if ( aMode & UserHeap::EChunkHeapSwitchTo )
- User::SwitchHeap(this);
-
- iHandles = &iChunkHandle;
- if (!(iFlags & ESingleThreaded))
- {
- // now change the thread-relative chunk/semaphore handles into process-relative handles
- iHandleCount = 2;
- if(duplicateLock)
- {
- RHandleBase s = iLock;
- r = iLock.Duplicate(RThread());
- s.Close();
- }
- if (r==KErrNone && (aMode & UserHeap::EChunkHeapDuplicate))
- {
- r = ((RChunk*)&iChunkHandle)->Duplicate(RThread());
- if (r!=KErrNone)
- iLock.Close(), iChunkHandle=0;
- }
- }
- else
- {
- iHandleCount = 1;
- if (aMode & UserHeap::EChunkHeapDuplicate)
- r = ((RChunk*)&iChunkHandle)->Duplicate(RThread(), EOwnerThread);
- }
-
- return r;
-}
-#endif
-
-void RHybridHeap::Init(TInt aBitmapSlab, TInt aPagePower)
-{
- /*Moved code which does initialization */
- iTop = (TUint8*)this + iMinLength;
- iBase = Ceiling(iBase, ECellAlignment); // Align iBase address
-
- __INIT_COUNTERS(0);
- // memset(&mparams,0,sizeof(mparams));
-
- InitDlMalloc(iTop - iBase, 0);
-
-#ifndef __KERNEL_MODE__
- SlabInit();
- iSlabConfigBits = aBitmapSlab;
- if ( iChunkSize > iSlabInitThreshold )
- {
- iSlabInitThreshold = KMaxTInt32;
- SlabConfig(aBitmapSlab); // Delayed slab configuration done
- }
- if ( aPagePower )
- {
- RChunk chunk;
- chunk.SetHandle(iChunkHandle);
- iMemBase = chunk.Base(); // Store base address for paged allocator
- }
-
- /*10-1K,11-2K,12-4k,13-8K,14-16K,15-32K,16-64K*/
- PagedInit(aPagePower);
-
-#ifdef ENABLE_BTRACE
- TUint32 traceData[3];
- traceData[0] = aBitmapSlab;
- traceData[1] = aPagePower;
- traceData[2] = GM->iTrimCheck;
- BTraceContextN(BTrace::ETest1, 90, (TUint32)this, 0, traceData, sizeof(traceData));
-#endif
-#else
- (void)aBitmapSlab;
- (void)aPagePower;
-#endif // __KERNEL_MODE__
-
-}
-
-
-TInt RHybridHeap::AllocLen(const TAny* aCell) const
-{
- aCell = __GET_DEBUG_DATA_BFR(aCell);
-
- if (PtrDiff(aCell, this) >= 0)
- {
- mchunkptr m = MEM2CHUNK(aCell);
- return CHUNKSIZE(m) - OVERHEAD_FOR(m) - __DEBUG_HDR_SIZE;
- }
-#ifndef __KERNEL_MODE__
- if ( aCell )
- {
- if (LowBits(aCell, iPageSize) )
- return SlabHeaderSize(slab::SlabFor(aCell)->iHeader) - __DEBUG_HDR_SIZE;
-
- return PagedSize((void*)aCell) - __DEBUG_HDR_SIZE;
- }
-#endif
- return 0; // NULL pointer situation, should PANIC !!
-}
-
-#ifdef __KERNEL_MODE__
-TAny* RHybridHeap::Alloc(TInt aSize)
-{
- __CHECK_THREAD_STATE;
- __ASSERT_ALWAYS((TUint)aSize<(KMaxTInt/2),HEAP_PANIC(ETHeapBadAllocatedCellSize));
- __SIMULATE_ALLOC_FAIL(return NULL;)
- Lock();
- __ALLOC_DEBUG_HEADER(aSize);
- TAny* addr = DlMalloc(aSize);
- if ( addr )
- {
-// iCellCount++;
- __SET_DEBUG_DATA(addr, iNestingLevel, ++iAllocCount);
- addr = __GET_USER_DATA_BFR(addr);
- __INCREMENT_COUNTERS(addr);
- memclr(addr, AllocLen(addr));
- }
- Unlock();
-#ifdef ENABLE_BTRACE
- if (iFlags & ETraceAllocs)
- {
- if ( addr )
- {
- TUint32 traceData[3];
- traceData[0] = AllocLen(addr);
- traceData[1] = aSize - __DEBUG_HDR_SIZE;
- traceData[2] = 0;
- BTraceContextN(BTrace::EHeap, BTrace::EHeapAlloc, (TUint32)this, (TUint32)addr, traceData, sizeof(traceData));
- }
- else
- BTraceContext8(BTrace::EHeap, BTrace::EHeapAllocFail, (TUint32)this, (TUint32)(aSize - __DEBUG_HDR_SIZE));
- }
-#endif
- return addr;
-}
-#else
-
-TAny* RHybridHeap::Alloc(TInt aSize)
-{
- __ASSERT_ALWAYS((TUint)aSize<(KMaxTInt/2),HEAP_PANIC(ETHeapBadAllocatedCellSize));
- __SIMULATE_ALLOC_FAIL(return NULL;)
-
- TAny* addr;
-#ifdef ENABLE_BTRACE
- TInt aSubAllocator=0;
-#endif
-
- Lock();
-
- __ALLOC_DEBUG_HEADER(aSize);
-
- if (aSize < iSlabThreshold)
- {
- TInt ix = iSizeMap[(aSize+3)>>2];
- HEAP_ASSERT(ix != 0xff);
- addr = SlabAllocate(iSlabAlloc[ix]);
- if ( !addr )
- { // Slab allocation has failed, try to allocate from DL
- addr = DlMalloc(aSize);
- }
-#ifdef ENABLE_BTRACE
- else
- aSubAllocator=1;
-#endif
- }else if((aSize >> iPageThreshold)==0)
- {
- addr = DlMalloc(aSize);
- }
- else
- {
- addr = PagedAllocate(aSize);
- if ( !addr )
- { // Page allocation has failed, try to allocate from DL
- addr = DlMalloc(aSize);
- }
-#ifdef ENABLE_BTRACE
- else
- aSubAllocator=2;
-#endif
- }
-
- if ( addr )
- {
-// iCellCount++;
- __SET_DEBUG_DATA(addr, iNestingLevel, ++iAllocCount);
- addr = __GET_USER_DATA_BFR(addr);
- __INCREMENT_COUNTERS(addr);
- }
- Unlock();
-
-#ifdef ENABLE_BTRACE
- if (iFlags & ETraceAllocs)
- {
- if ( addr )
- {
- TUint32 traceData[3];
- traceData[0] = AllocLen(addr);
- traceData[1] = aSize - __DEBUG_HDR_SIZE;
- traceData[2] = aSubAllocator;
- BTraceContextN(BTrace::EHeap, BTrace::EHeapAlloc, (TUint32)this, (TUint32)addr, traceData, sizeof(traceData));
- }
- else
- BTraceContext8(BTrace::EHeap, BTrace::EHeapAllocFail, (TUint32)this, (TUint32)(aSize - __DEBUG_HDR_SIZE));
- }
-#endif
-
- return addr;
-}
-#endif // __KERNEL_MODE__
-
-#ifndef __KERNEL_MODE__
-TInt RHybridHeap::Compress()
-{
- if ( IS_FIXED_HEAP )
- return 0;
-
- Lock();
- TInt Reduced = SysTrim(GM, 0);
- if (iSparePage)
- {
- Unmap(iSparePage, iPageSize);
- iSparePage = 0;
- Reduced += iPageSize;
- }
- Unlock();
- return Reduced;
-}
-#endif
-
-void RHybridHeap::Free(TAny* aPtr)
-{
- __CHECK_THREAD_STATE;
- if ( !aPtr )
- return;
-#ifdef ENABLE_BTRACE
- TInt aSubAllocator=0;
-#endif
- Lock();
-
- aPtr = __GET_DEBUG_DATA_BFR(aPtr);
-
-#ifndef __KERNEL_MODE__
- if (PtrDiff(aPtr, this) >= 0)
- {
-#endif
- __DL_BFR_CHECK(GM, aPtr);
- __DECREMENT_COUNTERS(__GET_USER_DATA_BFR(aPtr));
- __ZAP_CELL(aPtr);
- DlFree( aPtr);
-#ifndef __KERNEL_MODE__
- }
-
- else if ( LowBits(aPtr, iPageSize) == 0 )
- {
-#ifdef ENABLE_BTRACE
- aSubAllocator = 2;
-#endif
- __PAGE_BFR_CHECK(aPtr);
- __DECREMENT_COUNTERS(__GET_USER_DATA_BFR(aPtr));
- PagedFree(aPtr);
- }
- else
- {
-#ifdef ENABLE_BTRACE
- aSubAllocator = 1;
-#endif
- TUint32 bm[4];
- __SLAB_BFR_CHECK(slab::SlabFor(aPtr),aPtr,bm);
- __DECREMENT_COUNTERS(__GET_USER_DATA_BFR(aPtr));
- __ZAP_CELL(aPtr);
- SlabFree(aPtr);
- }
-#endif // __KERNEL_MODE__
-// iCellCount--;
- Unlock();
-#ifdef ENABLE_BTRACE
- if (iFlags & ETraceAllocs)
- {
- TUint32 traceData;
- traceData = aSubAllocator;
- BTraceContextN(BTrace::EHeap, BTrace::EHeapFree, (TUint32)this, (TUint32)__GET_USER_DATA_BFR(aPtr), &traceData, sizeof(traceData));
- }
-#endif
-}
-
-#ifndef __KERNEL_MODE__
-void RHybridHeap::Reset()
-/**
-Frees all allocated cells on this heap.
-*/
-{
- Lock();
- if ( !IS_FIXED_HEAP )
- {
- if ( GM->iSeg.iSize > (iMinLength - sizeof(*this)) )
- Unmap(GM->iSeg.iBase + (iMinLength - sizeof(*this)), (GM->iSeg.iSize - (iMinLength - sizeof(*this))));
- ResetBitmap();
- if ( !iDLOnly )
- Init(iSlabConfigBits, iPageThreshold);
- else
- Init(0,0);
- }
- else Init(0,0);
- Unlock();
-}
-#endif
-
-TAny* RHybridHeap::ReAllocImpl(TAny* aPtr, TInt aSize, TInt aMode)
-{
- // First handle special case of calling reallocate with NULL aPtr
- if (!aPtr)
- {
- if (( aMode & ENeverMove ) == 0 )
- {
- aPtr = Alloc(aSize - __DEBUG_HDR_SIZE);
- aPtr = __GET_DEBUG_DATA_BFR(aPtr);
- }
- return aPtr;
- }
-
- TInt oldsize = AllocLen(__GET_USER_DATA_BFR(aPtr)) + __DEBUG_HDR_SIZE;
-
- // Insist on geometric growth when reallocating memory, this reduces copying and fragmentation
- // generated during arithmetic growth of buffer/array/vector memory
- // Experiments have shown that 25% is a good threshold for this policy
- if (aSize <= oldsize)
- {
- if (aSize >= oldsize - (oldsize>>2))
- return aPtr; // don't change if >75% original size
- }
- else
- {
- __SIMULATE_ALLOC_FAIL(return NULL;)
- if (aSize < oldsize + (oldsize>>2))
- {
- aSize = _ALIGN_UP(oldsize + (oldsize>>2), 4); // grow to at least 125% original size
- }
- }
- __DEBUG_SAVE(aPtr);
-
- TAny* newp;
-#ifdef __KERNEL_MODE__
- Lock();
- __DL_BFR_CHECK(GM, aPtr);
- newp = DlRealloc(aPtr, aSize, aMode);
- Unlock();
- if ( newp )
- {
- if ( aSize > oldsize )
- memclr(((TUint8*)newp) + oldsize, (aSize-oldsize)); // Buffer has grown in place, clear extra
- __DEBUG_RESTORE(newp);
- __UPDATE_ALLOC_COUNT(aPtr, newp, ++iAllocCount);
- __UPDATE_TOTAL_ALLOC(newp, oldsize);
- }
-#else
- // Decide how to reallocate based on (a) the current cell location, (b) the mode requested and (c) the new size
- if ( PtrDiff(aPtr, this) >= 0 )
- { // current cell in Doug Lea iArena
- if ( (aMode & ENeverMove)
- ||
- (!(aMode & EAllowMoveOnShrink) && (aSize < oldsize))
- ||
- ((aSize >= iSlabThreshold) && ((aSize >> iPageThreshold) == 0)) )
- {
- Lock();
- __DL_BFR_CHECK(GM, aPtr);
- newp = DlRealloc(aPtr, aSize, aMode); // old and new in DL allocator
- Unlock();
- __DEBUG_RESTORE(newp);
- __UPDATE_ALLOC_COUNT(aPtr,newp, ++iAllocCount);
- __UPDATE_TOTAL_ALLOC(newp, oldsize);
- return newp;
- }
- }
- else if (LowBits(aPtr, iPageSize) == 0)
- { // current cell in paged iArena
- if ( (aMode & ENeverMove)
- ||
- (!(aMode & EAllowMoveOnShrink) && (aSize < oldsize))
- ||
- ((aSize >> iPageThreshold) != 0) )
- {
- Lock();
- __PAGE_BFR_CHECK(aPtr);
- newp = PagedReallocate(aPtr, aSize, aMode); // old and new in paged allocator
- Unlock();
- __DEBUG_RESTORE(newp);
- __UPDATE_ALLOC_COUNT(aPtr,newp, ++iAllocCount);
- __UPDATE_TOTAL_ALLOC(newp, oldsize);
- return newp;
- }
- }
- else
- { // current cell in slab iArena
- TUint32 bm[4];
- Lock();
- __SLAB_BFR_CHECK(slab::SlabFor(aPtr), aPtr, bm);
- Unlock();
- if ( aSize <= oldsize)
- return aPtr;
- if (aMode & ENeverMove)
- return NULL; // cannot grow in slab iArena
- // just use alloc/copy/free...
- }
-
- // fallback to allocate and copy
- // shouldn't get here if we cannot move the cell
- // __ASSERT(mode == emobile || (mode==efixshrink && size>oldsize));
-
- newp = Alloc(aSize - __DEBUG_HDR_SIZE);
- newp = __GET_DEBUG_DATA_BFR(newp);
- if (newp)
- {
- memcpy(newp, aPtr, oldsize<aSize ? oldsize : aSize);
- __DEBUG_RESTORE(newp);
- Free(__GET_USER_DATA_BFR(aPtr));
- }
-
-#endif // __KERNEL_MODE__
- return newp;
-}
-
-
-TAny* RHybridHeap::ReAlloc(TAny* aPtr, TInt aSize, TInt aMode )
-{
-
- aPtr = __GET_DEBUG_DATA_BFR(aPtr);
- __ALLOC_DEBUG_HEADER(aSize);
-
- TAny* retval = ReAllocImpl(aPtr, aSize, aMode);
-
- retval = __GET_USER_DATA_BFR(retval);
-
-#ifdef ENABLE_BTRACE
- if (iFlags & ETraceAllocs)
- {
- if ( retval )
- {
- TUint32 traceData[3];
- traceData[0] = AllocLen(retval);
- traceData[1] = aSize - __DEBUG_HDR_SIZE;
- traceData[2] = (TUint32)aPtr;
- BTraceContextN(BTrace::EHeap, BTrace::EHeapReAlloc,(TUint32)this, (TUint32)retval, traceData, sizeof(traceData));
- }
- else
- BTraceContext12(BTrace::EHeap, BTrace::EHeapReAllocFail, (TUint32)this, (TUint32)aPtr, (TUint32)(aSize - __DEBUG_HDR_SIZE));
- }
-#endif
- return retval;
-}
-
-#ifndef __KERNEL_MODE__
-TInt RHybridHeap::Available(TInt& aBiggestBlock) const
-/**
-Gets the total free space currently available on the heap and the space
-available in the largest free block.
-
-Note that this function exists mainly for compatibility reasons. In a modern
-heap implementation such as that present in Symbian it is not appropriate to
-concern oneself with details such as the amount of free memory available on a
-heap and its largeset free block, because the way that a modern heap implementation
-works is not simple. The amount of available virtual memory != physical memory
-and there are multiple allocation strategies used internally, which makes all
-memory usage figures "fuzzy" at best.
-
-In short, if you want to see if there is enough memory available to allocate a
-block of memory, call Alloc() and if it succeeds then there is enough memory!
-Messing around with functions like this is somewhat pointless with modern heap
-allocators.
-
-@param aBiggestBlock On return, contains the space available in the largest
- free block on the heap. Due to the internals of modern
- heap implementations, you can probably still allocate a
- block larger than this!
-
-@return The total free space currently available on the heap. Again, you can
- probably still allocate more than this!
-*/
-{
- struct HeapInfo info;
- Lock();
- TInt Biggest = GetInfo(&info);
- aBiggestBlock = __GET_AVAIL_BLOCK_SIZE(Biggest);
- Unlock();
- return __GET_AVAIL_BLOCK_SIZE(info.iFreeBytes);
-
-}
-
-TInt RHybridHeap::AllocSize(TInt& aTotalAllocSize) const
- /**
- Gets the number of cells allocated on this heap, and the total space
- allocated to them.
-
- @param aTotalAllocSize On return, contains the total space allocated
- to the cells.
-
- @return The number of cells allocated on this heap.
-*/
-{
- struct HeapInfo info;
- Lock();
- GetInfo(&info);
- aTotalAllocSize = info.iAllocBytes - __REMOVE_DBG_HDR(info.iAllocN);
- Unlock();
- return info.iAllocN;
-}
-
-#endif
-
-TInt RHybridHeap::Extension_(TUint /* aExtensionId */, TAny*& /* a0 */, TAny* /* a1 */)
-{
- return KErrNotSupported;
-}
-
-
-
-///////////////////////////////////////////////////////////////////////////////
-// imported from dla.cpp
-///////////////////////////////////////////////////////////////////////////////
-
-//#include <unistd.h>
-//#define DEBUG_REALLOC
-#ifdef DEBUG_REALLOC
-#include <e32debug.h>
-#endif
-
-inline void RHybridHeap::InitBins(mstate m)
-{
- /* Establish circular links for iSmallBins */
- bindex_t i;
- for (i = 0; i < NSMALLBINS; ++i) {
- sbinptr bin = SMALLBIN_AT(m,i);
- bin->iFd = bin->iBk = bin;
- }
- }
-/* ---------------------------- malloc support --------------------------- */
-
-/* allocate a large request from the best fitting chunk in a treebin */
-void* RHybridHeap::TmallocLarge(mstate m, size_t nb) {
- tchunkptr v = 0;
- size_t rsize = -nb; /* Unsigned negation */
- tchunkptr t;
- bindex_t idx;
- ComputeTreeIndex(nb, idx);
-
- if ((t = *TREEBIN_AT(m, idx)) != 0)
- {
- /* Traverse tree for this bin looking for node with size == nb */
- size_t sizebits = nb << LEFTSHIFT_FOR_TREE_INDEX(idx);
- tchunkptr rst = 0; /* The deepest untaken right subtree */
- for (;;)
- {
- tchunkptr rt;
- size_t trem = CHUNKSIZE(t) - nb;
- if (trem < rsize)
- {
- v = t;
- if ((rsize = trem) == 0)
- break;
- }
- rt = t->iChild[1];
- t = t->iChild[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1];
- if (rt != 0 && rt != t)
- rst = rt;
- if (t == 0)
- {
- t = rst; /* set t to least subtree holding sizes > nb */
- break;
- }
- sizebits <<= 1;
- }
- }
- if (t == 0 && v == 0)
- { /* set t to root of next non-empty treebin */
- binmap_t leftbits = LEFT_BITS(IDX2BIT(idx)) & m->iTreeMap;
- if (leftbits != 0)
- {
- bindex_t i;
- binmap_t leastbit = LEAST_BIT(leftbits);
- ComputeBit2idx(leastbit, i);
- t = *TREEBIN_AT(m, i);
- }
- }
- while (t != 0)
- { /* Find smallest of tree or subtree */
- size_t trem = CHUNKSIZE(t) - nb;
- if (trem < rsize) {
- rsize = trem;
- v = t;
- }
- t = LEFTMOST_CHILD(t);
- }
- /* If iDv is a better fit, return 0 so malloc will use it */
- if (v != 0 && rsize < (size_t)(m->iDvSize - nb))
- {
- if (RTCHECK(OK_ADDRESS(m, v)))
- { /* split */
- mchunkptr r = CHUNK_PLUS_OFFSET(v, nb);
- HEAP_ASSERT(CHUNKSIZE(v) == rsize + nb);
- if (RTCHECK(OK_NEXT(v, r)))
- {
- UnlinkLargeChunk(m, v);
- if (rsize < MIN_CHUNK_SIZE)
- SET_INUSE_AND_PINUSE(m, v, (rsize + nb));
- else
- {
- SET_SIZE_AND_PINUSE_OF_INUSE_CHUNK(m, v, nb);
- SET_SIZE_AND_PINUSE_OF_FREE_CHUNK(r, rsize);
- InsertChunk(m, r, rsize);
- }
- return CHUNK2MEM(v);
- }
- }
- // CORRUPTION_ERROR_ACTION(m);
- }
- return 0;
- }
-
-/* allocate a small request from the best fitting chunk in a treebin */
-void* RHybridHeap::TmallocSmall(mstate m, size_t nb)
-{
- tchunkptr t, v;
- size_t rsize;
- bindex_t i;
- binmap_t leastbit = LEAST_BIT(m->iTreeMap);
- ComputeBit2idx(leastbit, i);
-
- v = t = *TREEBIN_AT(m, i);
- rsize = CHUNKSIZE(t) - nb;
-
- while ((t = LEFTMOST_CHILD(t)) != 0)
- {
- size_t trem = CHUNKSIZE(t) - nb;
- if (trem < rsize)
- {
- rsize = trem;
- v = t;
- }
- }
-
- if (RTCHECK(OK_ADDRESS(m, v)))
- {
- mchunkptr r = CHUNK_PLUS_OFFSET(v, nb);
- HEAP_ASSERT(CHUNKSIZE(v) == rsize + nb);
- if (RTCHECK(OK_NEXT(v, r)))
- {
- UnlinkLargeChunk(m, v);
- if (rsize < MIN_CHUNK_SIZE)
- SET_INUSE_AND_PINUSE(m, v, (rsize + nb));
- else
- {
- SET_SIZE_AND_PINUSE_OF_INUSE_CHUNK(m, v, nb);
- SET_SIZE_AND_PINUSE_OF_FREE_CHUNK(r, rsize);
- ReplaceDv(m, r, rsize);
- }
- return CHUNK2MEM(v);
- }
- }
- // CORRUPTION_ERROR_ACTION(m);
- // return 0;
- }
-
-inline void RHybridHeap::InitTop(mstate m, mchunkptr p, size_t psize)
-{
- /* Ensure alignment */
- size_t offset = ALIGN_OFFSET(CHUNK2MEM(p));
- p = (mchunkptr)((TUint8*)p + offset);
- psize -= offset;
- m->iTop = p;
- m->iTopSize = psize;
- p->iHead = psize | PINUSE_BIT;
- /* set size of fake trailing chunk holding overhead space only once */
- mchunkptr chunkPlusOff = CHUNK_PLUS_OFFSET(p, psize);
- chunkPlusOff->iHead = TOP_FOOT_SIZE;
- m->iTrimCheck = KHeapShrinkHysRatio*(iGrowBy>>8);
-}
-
-
-/* Unlink the first chunk from a smallbin */
-inline void RHybridHeap::UnlinkFirstSmallChunk(mstate M,mchunkptr B,mchunkptr P,bindex_t& I)
-{
- mchunkptr F = P->iFd;
- HEAP_ASSERT(P != B);
- HEAP_ASSERT(P != F);
- HEAP_ASSERT(CHUNKSIZE(P) == SMALL_INDEX2SIZE(I));
- if (B == F)
- CLEAR_SMALLMAP(M, I);
- else if (RTCHECK(OK_ADDRESS(M, F)))
- {
- B->iFd = F;
- F->iBk = B;
- }
- else
- {
- CORRUPTION_ERROR_ACTION(M);
- }
-}
-/* Link a free chunk into a smallbin */
-inline void RHybridHeap::InsertSmallChunk(mstate M,mchunkptr P, size_t S)
-{
- bindex_t I = SMALL_INDEX(S);
- mchunkptr B = SMALLBIN_AT(M, I);
- mchunkptr F = B;
- HEAP_ASSERT(S >= MIN_CHUNK_SIZE);
- if (!SMALLMAP_IS_MARKED(M, I))
- MARK_SMALLMAP(M, I);
- else if (RTCHECK(OK_ADDRESS(M, B->iFd)))
- F = B->iFd;
- else
- {
- CORRUPTION_ERROR_ACTION(M);
- }
- B->iFd = P;
- F->iBk = P;
- P->iFd = F;
- P->iBk = B;
-}
-
-
-inline void RHybridHeap::InsertChunk(mstate M,mchunkptr P,size_t S)
-{
- if (IS_SMALL(S))
- InsertSmallChunk(M, P, S);
- else
- {
- tchunkptr TP = (tchunkptr)(P); InsertLargeChunk(M, TP, S);
- }
-}
-
-inline void RHybridHeap::UnlinkLargeChunk(mstate M,tchunkptr X)
-{
- tchunkptr XP = X->iParent;
- tchunkptr R;
- if (X->iBk != X)
- {
- tchunkptr F = X->iFd;
- R = X->iBk;
- if (RTCHECK(OK_ADDRESS(M, F)))
- {
- F->iBk = R;
- R->iFd = F;
- }
- else
- {
- CORRUPTION_ERROR_ACTION(M);
- }
- }
- else
- {
- tchunkptr* RP;
- if (((R = *(RP = &(X->iChild[1]))) != 0) ||
- ((R = *(RP = &(X->iChild[0]))) != 0))
- {
- tchunkptr* CP;
- while ((*(CP = &(R->iChild[1])) != 0) ||
- (*(CP = &(R->iChild[0])) != 0))
- {
- R = *(RP = CP);
- }
- if (RTCHECK(OK_ADDRESS(M, RP)))
- *RP = 0;
- else
- {
- CORRUPTION_ERROR_ACTION(M);
- }
- }
- }
- if (XP != 0)
- {
- tbinptr* H = TREEBIN_AT(M, X->iIndex);
- if (X == *H)
- {
- if ((*H = R) == 0)
- CLEAR_TREEMAP(M, X->iIndex);
- }
- else if (RTCHECK(OK_ADDRESS(M, XP)))
- {
- if (XP->iChild[0] == X)
- XP->iChild[0] = R;
- else
- XP->iChild[1] = R;
- }
- else
- CORRUPTION_ERROR_ACTION(M);
- if (R != 0)
- {
- if (RTCHECK(OK_ADDRESS(M, R)))
- {
- tchunkptr C0, C1;
- R->iParent = XP;
- if ((C0 = X->iChild[0]) != 0)
- {
- if (RTCHECK(OK_ADDRESS(M, C0)))
- {
- R->iChild[0] = C0;
- C0->iParent = R;
- }
- else
- CORRUPTION_ERROR_ACTION(M);
- }
- if ((C1 = X->iChild[1]) != 0)
- {
- if (RTCHECK(OK_ADDRESS(M, C1)))
- {
- R->iChild[1] = C1;
- C1->iParent = R;
- }
- else
- CORRUPTION_ERROR_ACTION(M);
- }
- }
- else
- CORRUPTION_ERROR_ACTION(M);
- }
- }
-}
-
-/* Unlink a chunk from a smallbin */
-inline void RHybridHeap::UnlinkSmallChunk(mstate M, mchunkptr P,size_t S)
-{
- mchunkptr F = P->iFd;
- mchunkptr B = P->iBk;
- bindex_t I = SMALL_INDEX(S);
- HEAP_ASSERT(P != B);
- HEAP_ASSERT(P != F);
- HEAP_ASSERT(CHUNKSIZE(P) == SMALL_INDEX2SIZE(I));
- if (F == B)
- CLEAR_SMALLMAP(M, I);
- else if (RTCHECK((F == SMALLBIN_AT(M,I) || OK_ADDRESS(M, F)) &&
- (B == SMALLBIN_AT(M,I) || OK_ADDRESS(M, B))))
- {
- F->iBk = B;
- B->iFd = F;
- }
- else
- {
- CORRUPTION_ERROR_ACTION(M);
- }
-}
-
-inline void RHybridHeap::UnlinkChunk(mstate M, mchunkptr P, size_t S)
-{
- if (IS_SMALL(S))
- UnlinkSmallChunk(M, P, S);
- else
- {
- tchunkptr TP = (tchunkptr)(P); UnlinkLargeChunk(M, TP);
- }
-}
-
-// For DL debug functions
-void RHybridHeap::DoComputeTreeIndex(size_t S, bindex_t& I)
-{
- ComputeTreeIndex(S, I);
-}
-
-inline void RHybridHeap::ComputeTreeIndex(size_t S, bindex_t& I)
-{
- size_t X = S >> TREEBIN_SHIFT;
- if (X == 0)
- I = 0;
- else if (X > 0xFFFF)
- I = NTREEBINS-1;
- else
- {
- unsigned int Y = (unsigned int)X;
- unsigned int N = ((Y - 0x100) >> 16) & 8;
- unsigned int K = (((Y <<= N) - 0x1000) >> 16) & 4;
- N += K;
- N += K = (((Y <<= K) - 0x4000) >> 16) & 2;
- K = 14 - N + ((Y <<= K) >> 15);
- I = (K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1));
- }
-}
-
-/* ------------------------- Operations on trees ------------------------- */
-
-/* Insert chunk into tree */
-inline void RHybridHeap::InsertLargeChunk(mstate M,tchunkptr X,size_t S)
-{
- tbinptr* H;
- bindex_t I;
- ComputeTreeIndex(S, I);
- H = TREEBIN_AT(M, I);
- X->iIndex = I;
- X->iChild[0] = X->iChild[1] = 0;
- if (!TREEMAP_IS_MARKED(M, I))
- {
- MARK_TREEMAP(M, I);
- *H = X;
- X->iParent = (tchunkptr)H;
- X->iFd = X->iBk = X;
- }
- else
- {
- tchunkptr T = *H;
- size_t K = S << LEFTSHIFT_FOR_TREE_INDEX(I);
- for (;;)
- {
- if (CHUNKSIZE(T) != S) {
- tchunkptr* C = &(T->iChild[(K >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]);
- K <<= 1;
- if (*C != 0)
- T = *C;
- else if (RTCHECK(OK_ADDRESS(M, C)))
- {
- *C = X;
- X->iParent = T;
- X->iFd = X->iBk = X;
- break;
- }
- else
- {
- CORRUPTION_ERROR_ACTION(M);
- break;
- }
- }
- else
- {
- tchunkptr F = T->iFd;
- if (RTCHECK(OK_ADDRESS(M, T) && OK_ADDRESS(M, F)))
- {
- T->iFd = F->iBk = X;
- X->iFd = F;
- X->iBk = T;
- X->iParent = 0;
- break;
- }
- else
- {
- CORRUPTION_ERROR_ACTION(M);
- break;
- }
- }
- }
- }
-}
-
-/*
-Unlink steps:
-
-1. If x is a chained node, unlink it from its same-sized iFd/iBk links
-and choose its iBk node as its replacement.
-2. If x was the last node of its size, but not a leaf node, it must
-be replaced with a leaf node (not merely one with an open left or
-right), to make sure that lefts and rights of descendents
-correspond properly to bit masks. We use the rightmost descendent
-of x. We could use any other leaf, but this is easy to locate and
-tends to counteract removal of leftmosts elsewhere, and so keeps
-paths shorter than minimally guaranteed. This doesn't loop much
-because on average a node in a tree is near the bottom.
-3. If x is the base of a chain (i.e., has iParent links) relink
-x's iParent and children to x's replacement (or null if none).
-*/
-
-/* Replace iDv node, binning the old one */
-/* Used only when iDvSize known to be small */
-inline void RHybridHeap::ReplaceDv(mstate M, mchunkptr P, size_t S)
-{
- size_t DVS = M->iDvSize;
- if (DVS != 0)
- {
- mchunkptr DV = M->iDv;
- HEAP_ASSERT(IS_SMALL(DVS));
- InsertSmallChunk(M, DV, DVS);
- }
- M->iDvSize = S;
- M->iDv = P;
-}
-
-
-inline void RHybridHeap::ComputeBit2idx(binmap_t X,bindex_t& I)
-{
- unsigned int Y = X - 1;
- unsigned int K = Y >> (16-4) & 16;
- unsigned int N = K; Y >>= K;
- N += K = Y >> (8-3) & 8; Y >>= K;
- N += K = Y >> (4-2) & 4; Y >>= K;
- N += K = Y >> (2-1) & 2; Y >>= K;
- N += K = Y >> (1-0) & 1; Y >>= K;
- I = (bindex_t)(N + Y);
-}
-
-
-
-int RHybridHeap::SysTrim(mstate m, size_t pad)
-{
- size_t extra = 0;
-
- if ( IS_INITIALIZED(m) )
- {
- pad += TOP_FOOT_SIZE; /* ensure enough room for segment overhead */
-
- if (m->iTopSize > pad)
- {
- extra = Floor(m->iTopSize - pad, iPageSize);
- if ( (m->iSeg.iSize - extra) < (iMinLength - sizeof(*this)) )
- {
- if ( m->iSeg.iSize > (iMinLength - sizeof(*this)) )
- extra = Floor(m->iSeg.iSize - (iMinLength - sizeof(*this)), iPageSize); /* do not shrink heap below min length */
- else extra = 0;
- }
-
- if ( extra )
- {
- Unmap(m->iSeg.iBase + m->iSeg.iSize - extra, extra);
-
- m->iSeg.iSize -= extra;
- InitTop(m, m->iTop, m->iTopSize - extra);
- CHECK_TOP_CHUNK(m, m->iTop);
- }
- }
-
- }
-
- return extra;
-}
-
-/* Get memory from system using MORECORE */
-
-void* RHybridHeap::SysAlloc(mstate m, size_t nb)
-{
- HEAP_ASSERT(m->iTop);
- /* Subtract out existing available iTop space from MORECORE request. */
-// size_t asize = _ALIGN_UP(nb - m->iTopSize + TOP_FOOT_SIZE + SIZE_T_ONE, iGrowBy);
- TInt asize = _ALIGN_UP(nb - m->iTopSize + SYS_ALLOC_PADDING, iGrowBy); // From DLA version 2.8.4
-
- char* br = (char*)Map(m->iSeg.iBase+m->iSeg.iSize, asize);
- if (!br)
- return 0;
- HEAP_ASSERT(br == (char*)m->iSeg.iBase+m->iSeg.iSize);
-
- /* Merge with an existing segment */
- m->iSeg.iSize += asize;
- InitTop(m, m->iTop, m->iTopSize + asize);
-
- if (nb < m->iTopSize)
- { /* Allocate from new or extended iTop space */
- size_t rsize = m->iTopSize -= nb;
- mchunkptr p = m->iTop;
- mchunkptr r = m->iTop = CHUNK_PLUS_OFFSET(p, nb);
- r->iHead = rsize | PINUSE_BIT;
- SET_SIZE_AND_PINUSE_OF_INUSE_CHUNK(m, p, nb);
- CHECK_TOP_CHUNK(m, m->iTop);
- CHECK_MALLOCED_CHUNK(m, CHUNK2MEM(p), nb);
- return CHUNK2MEM(p);
- }
-
- return 0;
-}
-
-
-void RHybridHeap::InitDlMalloc(size_t capacity, int /*locked*/)
-{
- memset(GM,0,sizeof(malloc_state));
- // The maximum amount that can be allocated can be calculated as:-
- // 2^sizeof(size_t) - sizeof(malloc_state) - TOP_FOOT_SIZE - page Size(all accordingly padded)
- // If the capacity exceeds this, no allocation will be done.
- GM->iSeg.iBase = iBase;
- GM->iSeg.iSize = capacity;
- InitBins(GM);
- InitTop(GM, (mchunkptr)iBase, capacity - TOP_FOOT_SIZE);
-}
-
-void* RHybridHeap::DlMalloc(size_t bytes)
-{
- /*
- Basic algorithm:
- If a small request (< 256 bytes minus per-chunk overhead):
- 1. If one exists, use a remainderless chunk in associated smallbin.
- (Remainderless means that there are too few excess bytes to
- represent as a chunk.)
- 2. If it is big enough, use the iDv chunk, which is normally the
- chunk adjacent to the one used for the most recent small request.
- 3. If one exists, split the smallest available chunk in a bin,
- saving remainder in iDv.
- 4. If it is big enough, use the iTop chunk.
- 5. If available, get memory from system and use it
- Otherwise, for a large request:
- 1. Find the smallest available binned chunk that fits, and use it
- if it is better fitting than iDv chunk, splitting if necessary.
- 2. If better fitting than any binned chunk, use the iDv chunk.
- 3. If it is big enough, use the iTop chunk.
- 4. If request size >= mmap threshold, try to directly mmap this chunk.
- 5. If available, get memory from system and use it
-*/
- void* mem;
- size_t nb;
- if (bytes <= MAX_SMALL_REQUEST)
- {
- bindex_t idx;
- binmap_t smallbits;
- nb = (bytes < MIN_REQUEST)? MIN_CHUNK_SIZE : PAD_REQUEST(bytes);
- idx = SMALL_INDEX(nb);
- smallbits = GM->iSmallMap >> idx;
-
- if ((smallbits & 0x3U) != 0)
- { /* Remainderless fit to a smallbin. */
- mchunkptr b, p;
- idx += ~smallbits & 1; /* Uses next bin if idx empty */
- b = SMALLBIN_AT(GM, idx);
- p = b->iFd;
- HEAP_ASSERT(CHUNKSIZE(p) == SMALL_INDEX2SIZE(idx));
- UnlinkFirstSmallChunk(GM, b, p, idx);
- SET_INUSE_AND_PINUSE(GM, p, SMALL_INDEX2SIZE(idx));
- mem = CHUNK2MEM(p);
- CHECK_MALLOCED_CHUNK(GM, mem, nb);
- return mem;
- }
-
- else if (nb > GM->iDvSize)
- {
- if (smallbits != 0)
- { /* Use chunk in next nonempty smallbin */
- mchunkptr b, p, r;
- size_t rsize;
- bindex_t i;
- binmap_t leftbits = (smallbits << idx) & LEFT_BITS(IDX2BIT(idx));
- binmap_t leastbit = LEAST_BIT(leftbits);
- ComputeBit2idx(leastbit, i);
- b = SMALLBIN_AT(GM, i);
- p = b->iFd;
- HEAP_ASSERT(CHUNKSIZE(p) == SMALL_INDEX2SIZE(i));
- UnlinkFirstSmallChunk(GM, b, p, i);
- rsize = SMALL_INDEX2SIZE(i) - nb;
- /* Fit here cannot be remainderless if 4byte sizes */
- if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE)
- SET_INUSE_AND_PINUSE(GM, p, SMALL_INDEX2SIZE(i));
- else
- {
- SET_SIZE_AND_PINUSE_OF_INUSE_CHUNK(GM, p, nb);
- r = CHUNK_PLUS_OFFSET(p, nb);
- SET_SIZE_AND_PINUSE_OF_FREE_CHUNK(r, rsize);
- ReplaceDv(GM, r, rsize);
- }
- mem = CHUNK2MEM(p);
- CHECK_MALLOCED_CHUNK(GM, mem, nb);
- return mem;
- }
-
- else if (GM->iTreeMap != 0 && (mem = TmallocSmall(GM, nb)) != 0)
- {
- CHECK_MALLOCED_CHUNK(GM, mem, nb);
- return mem;
- }
- }
- }
- else if (bytes >= MAX_REQUEST)
- nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */
- else
- {
- nb = PAD_REQUEST(bytes);
- if (GM->iTreeMap != 0 && (mem = TmallocLarge(GM, nb)) != 0)
- {
- CHECK_MALLOCED_CHUNK(GM, mem, nb);
- return mem;
- }
- }
-
- if (nb <= GM->iDvSize)
- {
- size_t rsize = GM->iDvSize - nb;
- mchunkptr p = GM->iDv;
- if (rsize >= MIN_CHUNK_SIZE)
- { /* split iDv */
- mchunkptr r = GM->iDv = CHUNK_PLUS_OFFSET(p, nb);
- GM->iDvSize = rsize;
- SET_SIZE_AND_PINUSE_OF_FREE_CHUNK(r, rsize);
- SET_SIZE_AND_PINUSE_OF_INUSE_CHUNK(GM, p, nb);
- }
- else
- { /* exhaust iDv */
- size_t dvs = GM->iDvSize;
- GM->iDvSize = 0;
- GM->iDv = 0;
- SET_INUSE_AND_PINUSE(GM, p, dvs);
- }
- mem = CHUNK2MEM(p);
- CHECK_MALLOCED_CHUNK(GM, mem, nb);
- return mem;
- }
-
- else if (nb < GM->iTopSize)
- { /* Split iTop */
- size_t rsize = GM->iTopSize -= nb;
- mchunkptr p = GM->iTop;
- mchunkptr r = GM->iTop = CHUNK_PLUS_OFFSET(p, nb);
- r->iHead = rsize | PINUSE_BIT;
- SET_SIZE_AND_PINUSE_OF_INUSE_CHUNK(GM, p, nb);
- mem = CHUNK2MEM(p);
- CHECK_TOP_CHUNK(GM, GM->iTop);
- CHECK_MALLOCED_CHUNK(GM, mem, nb);
- return mem;
- }
-
- return SysAlloc(GM, nb);
-}
-
-
-void RHybridHeap::DlFree(void* mem)
-{
- /*
- Consolidate freed chunks with preceding or succeeding bordering
- free chunks, if they exist, and then place in a bin. Intermixed
- with special cases for iTop, iDv, mmapped chunks, and usage errors.
-*/
- mchunkptr p = MEM2CHUNK(mem);
- CHECK_INUSE_CHUNK(GM, p);
- if (RTCHECK(OK_ADDRESS(GM, p) && OK_CINUSE(p)))
- {
- size_t psize = CHUNKSIZE(p);
- mchunkptr next = CHUNK_PLUS_OFFSET(p, psize);
- if (!PINUSE(p))
- {
- size_t prevsize = p->iPrevFoot;
- mchunkptr prev = CHUNK_MINUS_OFFSET(p, prevsize);
- psize += prevsize;
- p = prev;
- if (RTCHECK(OK_ADDRESS(GM, prev)))
- { /* consolidate backward */
- if (p != GM->iDv)
- {
- UnlinkChunk(GM, p, prevsize);
- }
- else if ((next->iHead & INUSE_BITS) == INUSE_BITS)
- {
- GM->iDvSize = psize;
- SET_FREE_WITH_PINUSE(p, psize, next);
- return;
- }
- }
- else
- {
- USAGE_ERROR_ACTION(GM, p);
- return;
- }
- }
-
- if (RTCHECK(OK_NEXT(p, next) && OK_PINUSE(next)))
- {
- if (!CINUSE(next))
- { /* consolidate forward */
- if (next == GM->iTop)
- {
- size_t tsize = GM->iTopSize += psize;
- GM->iTop = p;
- p->iHead = tsize | PINUSE_BIT;
- if (p == GM->iDv)
- {
- GM->iDv = 0;
- GM->iDvSize = 0;
- }
- if ( !IS_FIXED_HEAP && SHOULD_TRIM(GM, tsize) )
- SysTrim(GM, 0);
- return;
- }
- else if (next == GM->iDv)
- {
- size_t dsize = GM->iDvSize += psize;
- GM->iDv = p;
- SET_SIZE_AND_PINUSE_OF_FREE_CHUNK(p, dsize);
- return;
- }
- else
- {
- size_t nsize = CHUNKSIZE(next);
- psize += nsize;
- UnlinkChunk(GM, next, nsize);
- SET_SIZE_AND_PINUSE_OF_FREE_CHUNK(p, psize);
- if (p == GM->iDv)
- {
- GM->iDvSize = psize;
- return;
- }
- }
- }
- else
- SET_FREE_WITH_PINUSE(p, psize, next);
- InsertChunk(GM, p, psize);
- CHECK_FREE_CHUNK(GM, p);
- return;
- }
- }
-}
-
-
-void* RHybridHeap::DlRealloc(void* oldmem, size_t bytes, TInt mode)
-{
- mchunkptr oldp = MEM2CHUNK(oldmem);
- size_t oldsize = CHUNKSIZE(oldp);
- mchunkptr next = CHUNK_PLUS_OFFSET(oldp, oldsize);
- mchunkptr newp = 0;
- void* extra = 0;
-
- /* Try to either shrink or extend into iTop. Else malloc-copy-free */
-
- if (RTCHECK(OK_ADDRESS(GM, oldp) && OK_CINUSE(oldp) &&
- OK_NEXT(oldp, next) && OK_PINUSE(next)))
- {
- size_t nb = REQUEST2SIZE(bytes);
- if (oldsize >= nb) { /* already big enough */
- size_t rsize = oldsize - nb;
- newp = oldp;
- if (rsize >= MIN_CHUNK_SIZE)
- {
- mchunkptr remainder = CHUNK_PLUS_OFFSET(newp, nb);
- SET_INUSE(GM, newp, nb);
-// SET_INUSE(GM, remainder, rsize);
- SET_INUSE_AND_PINUSE(GM, remainder, rsize); // corrected in original DLA version V2.8.4
- extra = CHUNK2MEM(remainder);
- }
- }
- else if (next == GM->iTop && oldsize + GM->iTopSize > nb)
- {
- /* Expand into iTop */
- size_t newsize = oldsize + GM->iTopSize;
- size_t newtopsize = newsize - nb;
- mchunkptr newtop = CHUNK_PLUS_OFFSET(oldp, nb);
- SET_INUSE(GM, oldp, nb);
- newtop->iHead = newtopsize |PINUSE_BIT;
- GM->iTop = newtop;
- GM->iTopSize = newtopsize;
- newp = oldp;
- }
- }
- else
- {
- USAGE_ERROR_ACTION(GM, oldmem);
- }
-
- if (newp != 0)
- {
- if (extra != 0)
- {
- DlFree(extra);
- }
- CHECK_INUSE_CHUNK(GM, newp);
- return CHUNK2MEM(newp);
- }
- else
- {
- if ( mode & ENeverMove )
- return 0; // cannot move
- void* newmem = DlMalloc(bytes);
- if (newmem != 0)
- {
- size_t oc = oldsize - OVERHEAD_FOR(oldp);
- memcpy(newmem, oldmem, (oc < bytes)? oc : bytes);
- DlFree(oldmem);
- }
- return newmem;
- }
- // return 0;
-}
-
-size_t RHybridHeap::DlInfo(struct HeapInfo* i, SWalkInfo* wi) const
-{
- TInt max = ((GM->iTopSize-1) & ~CHUNK_ALIGN_MASK) - CHUNK_OVERHEAD;
- if ( max < 0 )
- max = 0;
- else ++i->iFreeN; // iTop always free
- i->iFreeBytes += max;
-
- Walk(wi, GM->iTop, max, EGoodFreeCell, EDougLeaAllocator); // Introduce DL iTop buffer to the walk function
-
- for (mchunkptr q = ALIGN_AS_CHUNK(GM->iSeg.iBase); q != GM->iTop; q = NEXT_CHUNK(q))
- {
- TInt sz = CHUNKSIZE(q);
- if (!CINUSE(q))
- {
- if ( sz > max )
- max = sz;
- i->iFreeBytes += sz;
- ++i->iFreeN;
- Walk(wi, CHUNK2MEM(q), sz, EGoodFreeCell, EDougLeaAllocator); // Introduce DL free buffer to the walk function
- }
- else
- {
- i->iAllocBytes += sz - CHUNK_OVERHEAD;
- ++i->iAllocN;
- Walk(wi, CHUNK2MEM(q), (sz- CHUNK_OVERHEAD), EGoodAllocatedCell, EDougLeaAllocator); // Introduce DL allocated buffer to the walk function
- }
- }
- return max; // return largest available chunk size
-}
-
-//
-// get statistics about the state of the allocator
-//
-TInt RHybridHeap::GetInfo(struct HeapInfo* i, SWalkInfo* wi) const
-{
- memset(i,0,sizeof(HeapInfo));
- i->iFootprint = iChunkSize;
- i->iMaxSize = iMaxLength;
-#ifndef __KERNEL_MODE__
- PagedInfo(i, wi);
- SlabInfo(i, wi);
-#endif
- return DlInfo(i,wi);
-}
-
-//
-// Methods to commit/decommit memory pages from chunk
-//
-
-
-void* RHybridHeap::Map(void* p, TInt sz)
-//
-// allocate pages in the chunk
-// if p is NULL, Find an allocate the required number of pages (which must lie in the lower half)
-// otherwise commit the pages specified
-//
-{
- HEAP_ASSERT(sz > 0);
-
- if ( iChunkSize + sz > iMaxLength)
- return 0;
-
-#ifdef __KERNEL_MODE__
-
- TInt r = ((DChunk*)iChunkHandle)->Adjust(iChunkSize + iOffset + sz);
- if (r < 0)
- return 0;
-
- iChunkSize += sz;
-
-#else
-
- RChunk chunk;
- chunk.SetHandle(iChunkHandle);
- if ( p )
- {
- TInt r;
- if ( iUseAdjust )
- r = chunk.Adjust(iChunkSize + sz);
- else
- {
- HEAP_ASSERT(sz == Ceiling(sz, iPageSize));
- HEAP_ASSERT(p == Floor(p, iPageSize));
- r = chunk.Commit(iOffset + PtrDiff(p, this),sz);
- }
- if (r < 0)
- return 0;
- }
- else
- {
- TInt r = chunk.Allocate(sz);
- if (r < 0)
- return 0;
- if (r > iOffset)
- {
- // can't allow page allocations in DL zone
- chunk.Decommit(r, sz);
- return 0;
- }
- p = Offset(this, r - iOffset);
- }
- iChunkSize += sz;
-
- if (iChunkSize >= iSlabInitThreshold)
- { // set up slab system now that heap is large enough
- SlabConfig(iSlabConfigBits);
- iSlabInitThreshold = KMaxTInt32;
- }
-
-#endif // __KERNEL_MODE__
-
-#ifdef ENABLE_BTRACE
- if(iChunkSize > iHighWaterMark)
- {
- iHighWaterMark = Ceiling(iChunkSize,16*iPageSize);
- TUint32 traceData[6];
- traceData[0] = iChunkHandle;
- traceData[1] = iMinLength;
- traceData[2] = iMaxLength;
- traceData[3] = sz;
- traceData[4] = iChunkSize;
- traceData[5] = iHighWaterMark;
- BTraceContextN(BTrace::ETest1, 90, (TUint32)this, 33, traceData, sizeof(traceData));
- }
-#endif
-
- return p;
-}
-
-void RHybridHeap::Unmap(void* p, TInt sz)
-{
- HEAP_ASSERT(sz > 0);
-
-#ifdef __KERNEL_MODE__
-
- (void)p;
- HEAP_ASSERT(sz == Ceiling(sz, iPageSize));
-#if defined(_DEBUG)
- TInt r =
-#endif
- ((DChunk*)iChunkHandle)->Adjust(iChunkSize + iOffset - sz);
- HEAP_ASSERT(r >= 0);
-
-#else
-
- RChunk chunk;
- chunk.SetHandle(iChunkHandle);
- if ( iUseAdjust )
- {
- HEAP_ASSERT(sz == Ceiling(sz, iPageSize));
-#if defined(_DEBUG)
- TInt r =
-#endif
- chunk.Adjust(iChunkSize - sz);
- HEAP_ASSERT(r >= 0);
- }
- else
- {
- HEAP_ASSERT(sz == Ceiling(sz, iPageSize));
- HEAP_ASSERT(p == Floor(p, iPageSize));
-#if defined(_DEBUG)
- TInt r =
-#endif
- chunk.Decommit(PtrDiff(p, Offset(this,-iOffset)), sz);
- HEAP_ASSERT(r >= 0);
- }
-#endif // __KERNEL_MODE__
-
- iChunkSize -= sz;
-}
-
-
-#ifndef __KERNEL_MODE__
-//
-// Slab allocator code
-//
-
-//inline slab* slab::SlabFor(void* p)
-slab* slab::SlabFor( const void* p)
-{
- return (slab*)(Floor(p, SLABSIZE));
-}
-
-//
-// Remove slab s from its tree/heap (not necessarily the root), preserving the address order
-// invariant of the heap
-//
-void RHybridHeap::TreeRemove(slab* s)
-{
- slab** r = s->iParent;
- slab* c1 = s->iChild1;
- slab* c2 = s->iChild2;
- for (;;)
- {
- if (!c2)
- {
- *r = c1;
- if (c1)
- c1->iParent = r;
- return;
- }
- if (!c1)
- {
- *r = c2;
- c2->iParent = r;
- return;
- }
- if (c1 > c2)
- {
- slab* c3 = c1;
- c1 = c2;
- c2 = c3;
- }
- slab* newc2 = c1->iChild2;
- *r = c1;
- c1->iParent = r;
- c1->iChild2 = c2;
- c2->iParent = &c1->iChild2;
- s = c1;
- c1 = s->iChild1;
- c2 = newc2;
- r = &s->iChild1;
- }
-}
-//
-// Insert slab s into the tree/heap rooted at r, preserving the address ordering
-// invariant of the heap
-//
-void RHybridHeap::TreeInsert(slab* s,slab** r)
-{
- slab* n = *r;
- for (;;)
- {
- if (!n)
- { // tree empty
- *r = s;
- s->iParent = r;
- s->iChild1 = s->iChild2 = 0;
- break;
- }
- if (s < n)
- { // insert between iParent and n
- *r = s;
- s->iParent = r;
- s->iChild1 = n;
- s->iChild2 = 0;
- n->iParent = &s->iChild1;
- break;
- }
- slab* c1 = n->iChild1;
- slab* c2 = n->iChild2;
- if ((c1 - 1) > (c2 - 1))
- {
- r = &n->iChild1;
- n = c1;
- }
- else
- {
- r = &n->iChild2;
- n = c2;
- }
- }
-}
-
-void* RHybridHeap::AllocNewSlab(slabset& allocator)
-//
-// Acquire and initialise a new slab, returning a cell from the slab
-// The strategy is:
-// 1. Use the lowest address free slab, if available. This is done by using the lowest slab
-// in the page at the root of the iPartialPage heap (which is address ordered). If the
-// is now fully used, remove it from the iPartialPage heap.
-// 2. Allocate a new page for iSlabs if no empty iSlabs are available
-//
-{
- page* p = page::PageFor(iPartialPage);
- if (!p)
- return AllocNewPage(allocator);
-
- unsigned h = p->iSlabs[0].iHeader;
- unsigned pagemap = SlabHeaderPagemap(h);
- HEAP_ASSERT(&p->iSlabs[HIBIT(pagemap)] == iPartialPage);
-
- unsigned slabix = LOWBIT(pagemap);
- p->iSlabs[0].iHeader = h &~ (0x100<<slabix);
- if (!(pagemap &~ (1<<slabix)))
- {
- TreeRemove(iPartialPage); // last free slab in page
- }
-
- return InitNewSlab(allocator, &p->iSlabs[slabix]);
-}
-
-/**Defination of this functionis not there in proto code***/
-#if 0
-void RHybridHeap::partial_insert(slab* s)
-{
- // slab has had first cell freed and needs to be linked back into iPartial tree
- slabset& ss = iSlabAlloc[iSizeMap[s->clz]];
-
- HEAP_ASSERT(s->used == slabfull);
- s->used = ss.fulluse - s->clz; // full-1 loading
- TreeInsert(s,&ss.iPartial);
- CHECKTREE(&ss.iPartial);
-}
-/**Defination of this functionis not there in proto code***/
-#endif
-
-void* RHybridHeap::AllocNewPage(slabset& allocator)
-//
-// Acquire and initialise a new page, returning a cell from a new slab
-// The iPartialPage tree is empty (otherwise we'd have used a slab from there)
-// The iPartialPage link is put in the highest addressed slab in the page, and the
-// lowest addressed slab is used to fulfill the allocation request
-//
-{
- page* p = iSparePage;
- if (p)
- iSparePage = 0;
- else
- {
- p = static_cast<page*>(Map(0, iPageSize));
- if (!p)
- return 0;
- }
- HEAP_ASSERT(p == Floor(p, iPageSize));
- // Store page allocated for slab into paged_bitmap (for RHybridHeap::Reset())
- if (!PagedSetSize(p, iPageSize))
- {
- Unmap(p, iPageSize);
- return 0;
- }
- p->iSlabs[0].iHeader = ((1<<3) + (1<<2) + (1<<1))<<8; // set pagemap
- p->iSlabs[3].iParent = &iPartialPage;
- p->iSlabs[3].iChild1 = p->iSlabs[3].iChild2 = 0;
- iPartialPage = &p->iSlabs[3];
- return InitNewSlab(allocator,&p->iSlabs[0]);
-}
-
-void RHybridHeap::FreePage(page* p)
-//
-// Release an unused page to the OS
-// A single page is cached for reuse to reduce thrashing
-// the OS allocator.
-//
-{
- HEAP_ASSERT(Ceiling(p, iPageSize) == p);
- if (!iSparePage)
- {
- iSparePage = p;
- return;
- }
-
- // unmapped slab page must be cleared from paged_bitmap, too
- PagedZapSize(p, iPageSize); // clear page map
-
- Unmap(p, iPageSize);
-}
-
-void RHybridHeap::FreeSlab(slab* s)
-//
-// Release an empty slab to the slab manager
-// The strategy is:
-// 1. The page containing the slab is checked to see the state of the other iSlabs in the page by
-// inspecting the pagemap field in the iHeader of the first slab in the page.
-// 2. The pagemap is updated to indicate the new unused slab
-// 3. If this is the only unused slab in the page then the slab iHeader is used to add the page to
-// the iPartialPage tree/heap
-// 4. If all the iSlabs in the page are now unused the page is release back to the OS
-// 5. If this slab has a higher address than the one currently used to track this page in
-// the iPartialPage heap, the linkage is moved to the new unused slab
-//
-{
- TreeRemove(s);
- CHECKTREE(s->iParent);
- HEAP_ASSERT(SlabHeaderUsedm4(s->iHeader) == SlabHeaderSize(s->iHeader)-4);
-
- page* p = page::PageFor(s);
- unsigned h = p->iSlabs[0].iHeader;
- int slabix = s - &p->iSlabs[0];
- unsigned pagemap = SlabHeaderPagemap(h);
- p->iSlabs[0].iHeader = h | (0x100<<slabix);
- if (pagemap == 0)
- { // page was full before, use this slab as link in empty heap
- TreeInsert(s, &iPartialPage);
- }
- else
- { // Find the current empty-link slab
- slab* sl = &p->iSlabs[HIBIT(pagemap)];
- pagemap ^= (1<<slabix);
- if (pagemap == 0xf)
- { // page is now empty so recycle page to os
- TreeRemove(sl);
- FreePage(p);
- return;
- }
- // ensure the free list link is in highest address slab in page
- if (s > sl)
- { // replace current link with new one. Address-order tree so position stays the same
- slab** r = sl->iParent;
- slab* c1 = sl->iChild1;
- slab* c2 = sl->iChild2;
- s->iParent = r;
- s->iChild1 = c1;
- s->iChild2 = c2;
- *r = s;
- if (c1)
- c1->iParent = &s->iChild1;
- if (c2)
- c2->iParent = &s->iChild2;
- }
- CHECK(if (s < sl) s=sl);
- }
- HEAP_ASSERT(SlabHeaderPagemap(p->iSlabs[0].iHeader) != 0);
- HEAP_ASSERT(HIBIT(SlabHeaderPagemap(p->iSlabs[0].iHeader)) == unsigned(s - &p->iSlabs[0]));
-}
-
-
-void RHybridHeap::SlabInit()
-{
- iSlabThreshold=0;
- iPartialPage = 0;
- iFullSlab = 0;
- iSparePage = 0;
- memset(&iSizeMap[0],0xff,sizeof(iSizeMap));
- memset(&iSlabAlloc[0],0,sizeof(iSlabAlloc));
-}
-
-void RHybridHeap::SlabConfig(unsigned slabbitmap)
-{
- HEAP_ASSERT((slabbitmap & ~EOkBits) == 0);
- HEAP_ASSERT(MAXSLABSIZE <= 60);
-
- unsigned int ix = 0xff;
- unsigned int bit = 1<<((MAXSLABSIZE>>2)-1);
- for (int sz = MAXSLABSIZE; sz >= 0; sz -= 4, bit >>= 1)
- {
- if (slabbitmap & bit)
- {
- if (ix == 0xff)
- iSlabThreshold=sz+1;
- ix = (sz>>2)-1;
- }
- iSizeMap[sz>>2] = (TUint8) ix;
- }
-}
-
-
-void* RHybridHeap::SlabAllocate(slabset& ss)
-//
-// Allocate a cell from the given slabset
-// Strategy:
-// 1. Take the partially full slab at the iTop of the heap (lowest address).
-// 2. If there is no such slab, allocate from a new slab
-// 3. If the slab has a non-empty freelist, pop the cell from the front of the list and update the slab
-// 4. Otherwise, if the slab is not full, return the cell at the end of the currently used region of
-// the slab, updating the slab
-// 5. Otherwise, release the slab from the iPartial tree/heap, marking it as 'floating' and go back to
-// step 1
-//
-{
- for (;;)
- {
- slab *s = ss.iPartial;
- if (!s)
- break;
- unsigned h = s->iHeader;
- unsigned free = h & 0xff; // extract free cell positioning
- if (free)
- {
- HEAP_ASSERT(((free<<2)-sizeof(slabhdr))%SlabHeaderSize(h) == 0);
- void* p = Offset(s,free<<2);
- free = *(unsigned char*)p; // get next pos in free list
- h += (h&0x3C000)<<6; // update usedm4
- h &= ~0xff;
- h |= free; // update freelist
- s->iHeader = h;
- HEAP_ASSERT(SlabHeaderFree(h) == 0 || ((SlabHeaderFree(h)<<2)-sizeof(slabhdr))%SlabHeaderSize(h) == 0);
- HEAP_ASSERT(SlabHeaderUsedm4(h) <= 0x3F8u);
- HEAP_ASSERT((SlabHeaderUsedm4(h)+4)%SlabHeaderSize(h) == 0);
- return p;
- }
- unsigned h2 = h + ((h&0x3C000)<<6);
-// if (h2 < 0xfc00000)
- if (h2 < MAXUSEDM4BITS)
- {
- HEAP_ASSERT((SlabHeaderUsedm4(h2)+4)%SlabHeaderSize(h2) == 0);
- s->iHeader = h2;
- return Offset(s,(h>>18) + sizeof(unsigned) + sizeof(slabhdr));
- }
- h |= FLOATING_BIT; // mark the slab as full-floating
- s->iHeader = h;
- TreeRemove(s);
- slab* c = iFullSlab; // add to full list
- iFullSlab = s;
- s->iParent = &iFullSlab;
- s->iChild1 = c;
- s->iChild2 = 0;
- if (c)
- c->iParent = &s->iChild1;
-
- CHECKTREE(&ss.iPartial);
- // go back and try the next slab...
- }
- // no iPartial iSlabs found, so allocate from a new slab
- return AllocNewSlab(ss);
-}
-
-void RHybridHeap::SlabFree(void* p)
-//
-// Free a cell from the slab allocator
-// Strategy:
-// 1. Find the containing slab (round down to nearest 1KB boundary)
-// 2. Push the cell into the slab's freelist, and update the slab usage count
-// 3. If this is the last allocated cell, free the slab to the main slab manager
-// 4. If the slab was full-floating then insert the slab in it's respective iPartial tree
-//
-{
- HEAP_ASSERT(LowBits(p,3)==0);
- slab* s = slab::SlabFor(p);
- CHECKSLAB(s,ESlabAllocator,p);
- CHECKSLABBFR(s,p);
-
- unsigned pos = LowBits(p, SLABSIZE);
- unsigned h = s->iHeader;
- HEAP_ASSERT(SlabHeaderUsedm4(h) != 0x3fC); // slab is empty already
- HEAP_ASSERT((pos-sizeof(slabhdr))%SlabHeaderSize(h) == 0);
- *(unsigned char*)p = (unsigned char)h;
- h &= ~0xFF;
- h |= (pos>>2);
- unsigned size = h & 0x3C000;
- if (int(h) >= 0)
- {
- h -= size<<6;
- if (int(h)>=0)
- {
- s->iHeader = h;
- return;
- }
- FreeSlab(s);
- return;
- }
- h -= size<<6;
- h &= ~FLOATING_BIT;
- s->iHeader = h;
- slab** full = s->iParent; // remove from full list
- slab* c = s->iChild1;
- *full = c;
- if (c)
- c->iParent = full;
-
- slabset& ss = iSlabAlloc[iSizeMap[size>>14]];
- TreeInsert(s,&ss.iPartial);
- CHECKTREE(&ss.iPartial);
-}
-
-void* RHybridHeap::InitNewSlab(slabset& allocator, slab* s)
-//
-// initialise an empty slab for this allocator and return the fist cell
-// pre-condition: the slabset has no iPartial iSlabs for allocation
-//
-{
- HEAP_ASSERT(allocator.iPartial==0);
- TInt size = 4 + ((&allocator-&iSlabAlloc[0])<<2); // infer size from slab allocator address
- unsigned h = s->iHeader & 0xF00; // preserve pagemap only
- h |= (size<<12); // set size
- h |= (size-4)<<18; // set usedminus4 to one object minus 4
- s->iHeader = h;
- allocator.iPartial = s;
- s->iParent = &allocator.iPartial;
- s->iChild1 = s->iChild2 = 0;
- return Offset(s,sizeof(slabhdr));
-}
-
-const unsigned char slab_bitcount[16] = {0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4};
-
-const unsigned char slab_ext_frag[16] =
-{
- 0,
- 16 + (1008 % 4),
- 16 + (1008 % 8),
- 16 + (1008 % 12),
- 16 + (1008 % 16),
- 16 + (1008 % 20),
- 16 + (1008 % 24),
- 16 + (1008 % 28),
- 16 + (1008 % 32),
- 16 + (1008 % 36),
- 16 + (1008 % 40),
- 16 + (1008 % 44),
- 16 + (1008 % 48),
- 16 + (1008 % 52),
- 16 + (1008 % 56),
- 16 + (1008 % 60)
-};
-
-void RHybridHeap::TreeWalk(slab* const* root, void (*f)(slab*, struct HeapInfo*, SWalkInfo*), struct HeapInfo* i, SWalkInfo* wi)
-{
- // iterative walk around the tree at root
-
- slab* s = *root;
- if (!s)
- return;
-
- for (;;)
- {
- slab* c;
- while ((c = s->iChild1) != 0)
- s = c; // walk down left side to end
- for (;;)
- {
- f(s, i, wi);
- c = s->iChild2;
- if (c)
- { // one step down right side, now try and walk down left
- s = c;
- break;
- }
- for (;;)
- { // loop to walk up right side
- slab** pp = s->iParent;
- if (pp == root)
- return;
- s = slab::SlabFor(pp);
- if (pp == &s->iChild1)
- break;
- }
- }
- }
-}
-
-void RHybridHeap::SlabEmptyInfo(slab* s, struct HeapInfo* i, SWalkInfo* wi)
-{
- Walk(wi, s, SLABSIZE, EGoodFreeCell, EEmptySlab); // Introduce an empty slab to the walk function
- int nslab = slab_bitcount[SlabHeaderPagemap(page::PageFor(s)->iSlabs[0].iHeader)];
- i->iFreeN += nslab;
- i->iFreeBytes += nslab << SLABSHIFT;
-}
-
-void RHybridHeap::SlabPartialInfo(slab* s, struct HeapInfo* i, SWalkInfo* wi)
-{
- Walk(wi, s, SLABSIZE, EGoodAllocatedCell, EPartialFullSlab); // Introduce a full slab to the walk function
- unsigned h = s->iHeader;
- unsigned used = SlabHeaderUsedm4(h)+4;
- unsigned size = SlabHeaderSize(h);
- unsigned free = 1024 - slab_ext_frag[size>>2] - used;
- i->iFreeN += (free/size);
- i->iFreeBytes += free;
- i->iAllocN += (used/size);
- i->iAllocBytes += used;
-}
-
-void RHybridHeap::SlabFullInfo(slab* s, struct HeapInfo* i, SWalkInfo* wi)
-{
- Walk(wi, s, SLABSIZE, EGoodAllocatedCell, EFullSlab); // Introduce a full slab to the walk function
- unsigned h = s->iHeader;
- unsigned used = SlabHeaderUsedm4(h)+4;
- unsigned size = SlabHeaderSize(h);
- HEAP_ASSERT(1024 - slab_ext_frag[size>>2] - used == 0);
- i->iAllocN += (used/size);
- i->iAllocBytes += used;
-}
-
-void RHybridHeap::SlabInfo(struct HeapInfo* i, SWalkInfo* wi) const
-{
- if (iSparePage)
- {
- i->iFreeBytes += iPageSize;
- i->iFreeN = 4;
- Walk(wi, iSparePage, iPageSize, EGoodFreeCell, ESlabSpare); // Introduce Slab spare page to the walk function
- }
- TreeWalk(&iFullSlab, &SlabFullInfo, i, wi);
- for (int ix = 0; ix < (MAXSLABSIZE>>2); ++ix)
- TreeWalk(&iSlabAlloc[ix].iPartial, &SlabPartialInfo, i, wi);
- TreeWalk(&iPartialPage, &SlabEmptyInfo, i, wi);
-}
-
-
-//
-// Bitmap class implementation for large page allocator
-//
-inline unsigned char* paged_bitmap::Addr() const {return iBase;}
-inline unsigned paged_bitmap::Size() const {return iNbits;}
-//
-
-void paged_bitmap::Init(unsigned char* p, unsigned size, unsigned bit)
-{
- iBase = p;
- iNbits=size;
- int bytes=Ceiling(size,8)>>3;
- memset(p,bit?0xff:0,bytes);
-}
-
-inline void paged_bitmap::Set(unsigned ix, unsigned bit)
-{
- if (bit)
- iBase[ix>>3] |= (1<<(ix&7));
- else
- iBase[ix>>3] &= ~(1<<(ix&7));
-}
-
-inline unsigned paged_bitmap::operator[](unsigned ix) const
-{
- return 1U&(iBase[ix>>3] >> (ix&7));
-}
-
-void paged_bitmap::Setn(unsigned ix, unsigned len, unsigned bit)
-{
- int l=len;
- while (--l>=0)
- Set(ix++,bit);
-}
-
-void paged_bitmap::Set(unsigned ix, unsigned len, unsigned val)
-{
- int l=len;
- while (--l>=0)
- {
- Set(ix++,val&1);
- val>>=1;
- }
-}
-
-unsigned paged_bitmap::Bits(unsigned ix, unsigned len) const
-{
- int l=len;
- unsigned val=0;
- unsigned bit=0;
- while (--l>=0)
- val |= (*this)[ix++]<<bit++;
- return val;
-}
-
-bool paged_bitmap::Is(unsigned ix, unsigned len, unsigned bit) const
-{
- unsigned i2 = ix+len;
- if (i2 > iNbits)
- return false;
- for (;;)
- {
- if ((*this)[ix] != bit)
- return false;
- if (++ix==i2)
- return true;
- }
-}
-
-int paged_bitmap::Find(unsigned start, unsigned bit) const
-{
- if (start<iNbits) do
- {
- if ((*this)[start]==bit)
- return start;
- } while (++start<iNbits);
- return -1;
-}
-
-
-//
-// Page allocator code
-//
-void RHybridHeap::PagedInit(TInt aPagePower)
-{
- if (aPagePower > 0)
- {
- if (aPagePower < MINPAGEPOWER)
- aPagePower = MINPAGEPOWER;
- }
- else aPagePower = 31;
-
- iPageThreshold = aPagePower;
- /*-------------------------------------------------------------
- * Initialize page bitmap
- *-------------------------------------------------------------*/
- iPageMap.Init((unsigned char*)&iBitMapBuffer, MAXSMALLPAGEBITS, 0);
-}
-
-void* RHybridHeap::PagedAllocate(unsigned size)
-{
- TInt nbytes = Ceiling(size, iPageSize);
- void* p = Map(0, nbytes);
- if (!p)
- return 0;
- if (!PagedSetSize(p, nbytes))
- {
- Unmap(p, nbytes);
- return 0;
- }
- return p;
-}
-
-void* RHybridHeap::PagedReallocate(void* p, unsigned size, TInt mode)
-{
-
- HEAP_ASSERT(Ceiling(p, iPageSize) == p);
- unsigned nbytes = Ceiling(size, iPageSize);
-
- unsigned osize = PagedSize(p);
- if ( nbytes == 0 ) // Special case to handle shrinking below min page threshold
- nbytes = Min((1 << MINPAGEPOWER), osize);
-
- if (osize == nbytes)
- return p;
-
- if (nbytes < osize)
- { // shrink in place, unmap final pages and rewrite the pagemap
- Unmap(Offset(p, nbytes), osize-nbytes);
- // zap old code and then write new code (will not fail)
- PagedZapSize(p, osize);
-
- TBool check = PagedSetSize(p, nbytes);
- __ASSERT_ALWAYS(check, HEAP_PANIC(ETHeapBadCellAddress));
-
- return p;
- }
-
- // nbytes > osize
- // try and extend current region first
-
- void* newp = Map(Offset(p, osize), nbytes-osize);
- if (newp)
- { // In place growth. Possibility that pagemap may have to grow AND then fails
- if (!PagedSetSize(p, nbytes))
- { // must release extra mapping
- Unmap(Offset(p, osize), nbytes-osize);
- return 0;
- }
- // if successful, the new length code will have overwritten the old one (it is at least as long)
- return p;
- }
-
- // fallback to allocate/copy/free
- if (mode & ENeverMove)
- return 0; // not allowed to move cell
-
- newp = PagedAllocate(nbytes);
- if (!newp)
- return 0;
- memcpy(newp, p, osize);
- PagedFree(p);
- return newp;
-}
-
-void RHybridHeap::PagedFree(void* p)
-{
- HEAP_ASSERT(Ceiling(p, iPageSize) == p);
-
-
- unsigned size = PagedSize(p);
-
- PagedZapSize(p, size); // clear page map
- Unmap(p, size);
-}
-
-void RHybridHeap::PagedInfo(struct HeapInfo* i, SWalkInfo* wi) const
-{
- for (int ix = 0;(ix = iPageMap.Find(ix,1)) >= 0;)
- {
- int npage = PagedDecode(ix);
- // Introduce paged buffer to the walk function
- TAny* bfr = Bitmap2addr(ix);
- int len = npage << PAGESHIFT;
- if ( len > iPageSize )
- { // If buffer is not larger than one page it must be a slab page mapped into bitmap
- i->iAllocBytes += len;
- ++i->iAllocN;
- Walk(wi, bfr, len, EGoodAllocatedCell, EPageAllocator);
- }
- ix += (npage<<1);
- }
-}
-
-void RHybridHeap::ResetBitmap()
-/*---------------------------------------------------------
- * Go through paged_bitmap and unmap all buffers to system
- * This method is called from RHybridHeap::Reset() to unmap all page
- * allocated - and slab pages which are stored in bitmap, too
- *---------------------------------------------------------*/
-{
- unsigned iNbits = iPageMap.Size();
- if ( iNbits )
- {
- for (int ix = 0;(ix = iPageMap.Find(ix,1)) >= 0;)
- {
- int npage = PagedDecode(ix);
- void* p = Bitmap2addr(ix);
- unsigned size = PagedSize(p);
- PagedZapSize(p, size); // clear page map
- Unmap(p, size);
- ix += (npage<<1);
- }
- if ( (TInt)iNbits > MAXSMALLPAGEBITS )
- {
- // unmap page reserved for enlarged bitmap
- Unmap(iPageMap.Addr(), (iNbits >> 3) );
- }
- }
-}
-
-TBool RHybridHeap::CheckBitmap(void* aBfr, TInt aSize, TUint32& aDummy, TInt& aNPages)
-/*---------------------------------------------------------
- * If aBfr = NULL
- * Go through paged_bitmap and unmap all buffers to system
- * and assure that by reading the first word of each page of aBfr
- * that aBfr is still accessible
- * else
- * Assure that specified buffer is mapped with correct length in
- * page map
- *---------------------------------------------------------*/
-{
- TBool ret;
- if ( aBfr )
- {
- __ASSERT_ALWAYS((Ceiling(aBfr, iPageSize) == aBfr), HEAP_PANIC(ETHeapBadCellAddress));
- ret = ( aSize == (TInt)PagedSize(aBfr));
- }
- else
- {
- ret = ETrue;
- unsigned iNbits = iPageMap.Size();
- if ( iNbits )
- {
- TInt npage;
- aNPages = 0;
- for (int ix = 0;(ix = iPageMap.Find(ix,1)) >= 0;)
- {
- npage = PagedDecode(ix);
- aNPages += npage;
- void* p = Bitmap2addr(ix);
- __ASSERT_ALWAYS((Ceiling(p, iPageSize) == p), HEAP_PANIC(ETHeapBadCellAddress));
- unsigned s = PagedSize(p);
- __ASSERT_ALWAYS((Ceiling(s, iPageSize) == s), HEAP_PANIC(ETHeapBadCellAddress));
- while ( s )
- {
- aDummy += *(TUint32*)((TUint8*)p + (s-iPageSize));
- s -= iPageSize;
- }
- ix += (npage<<1);
- }
- if ( (TInt)iNbits > MAXSMALLPAGEBITS )
- {
- // add enlarged bitmap page(s) to total page count
- npage = (iNbits >> 3);
- __ASSERT_ALWAYS((Ceiling(npage, iPageSize) == npage), HEAP_PANIC(ETHeapBadCellAddress));
- aNPages += (npage / iPageSize);
- }
- }
- }
-
- return ret;
-}
-
-
-// The paged allocations are tracked in a bitmap which has 2 bits per page
-// this allows us to store allocations as small as 4KB
-// The presence and size of an allocation is encoded as follows:
-// let N = number of pages in the allocation, then
-// 10 : N = 1 // 4KB
-// 110n : N = 2 + n // 8-12KB
-// 1110nnnn : N = nnnn // 16-60KB
-// 1111n[18] : N = n[18] // 64KB-1GB
-
-const struct etab { unsigned char offset, len, codelen, code;} encode_table[] =
-{
- {1,2,2,0x1},
- {2,4,3,0x3},
- {0,8,4,0x7},
- {0,22,4,0xf}
-};
-
-// Return code length for specified allocation Size(assumed to be aligned to pages)
-inline unsigned paged_codelen(unsigned size, unsigned pagesz)
-{
- HEAP_ASSERT(size == Ceiling(size, pagesz));
-
- if (size == pagesz)
- return 2;
- else if (size < 4*pagesz)
- return 4;
- else if (size < 16*pagesz)
- return 8;
- else
- return 22;
-}
-
-inline const etab& paged_coding(unsigned npage)
-{
- if (npage < 4)
- return encode_table[npage>>1];
- else if (npage < 16)
- return encode_table[2];
- else
- return encode_table[3];
-}
-
-bool RHybridHeap::PagedEncode(unsigned pos, unsigned npage)
-{
- const etab& e = paged_coding(npage);
- if (pos + e.len > iPageMap.Size())
- {
- // need to grow the page bitmap to fit the cell length into the map
- // if we outgrow original bitmap buffer in RHybridHeap metadata, then just get enough pages to cover the full space:
- // * initial 68 byte bitmap mapped (68*8*4kB):2 = 1,1MB
- // * 4KB can Map(4096*8*4kB):2 = 64MB
- unsigned maxsize = Ceiling(iMaxLength, iPageSize);
- unsigned mapbits = maxsize >> (PAGESHIFT-1);
- maxsize = Ceiling(mapbits>>3, iPageSize);
- void* newb = Map(0, maxsize);
- if (!newb)
- return false;
-
- unsigned char* oldb = iPageMap.Addr();
- iPageMap.Init((unsigned char*)newb, (maxsize<<3), 0);
- memcpy(newb, oldb, Ceiling(MAXSMALLPAGEBITS,8)>>3);
- }
- // encode the allocation block size into the bitmap, starting at the bit for the start page
- unsigned bits = e.code;
- bits |= (npage - e.offset) << e.codelen;
- iPageMap.Set(pos, e.len, bits);
- return true;
-}
-
-unsigned RHybridHeap::PagedDecode(unsigned pos) const
-{
- __ASSERT_ALWAYS(pos + 2 <= iPageMap.Size(), HEAP_PANIC(ETHeapBadCellAddress));
-
- unsigned bits = iPageMap.Bits(pos,2);
- __ASSERT_ALWAYS(bits & 1, HEAP_PANIC(ETHeapBadCellAddress));
- bits >>= 1;
- if (bits == 0)
- return 1;
- __ASSERT_ALWAYS(pos + 4 <= iPageMap.Size(), HEAP_PANIC(ETHeapBadCellAddress));
- bits = iPageMap.Bits(pos+2,2);
- if ((bits & 1) == 0)
- return 2 + (bits>>1);
- else if ((bits>>1) == 0)
- {
- __ASSERT_ALWAYS(pos + 8 <= iPageMap.Size(), HEAP_PANIC(ETHeapBadCellAddress));
- return iPageMap.Bits(pos+4, 4);
- }
- else
- {
- __ASSERT_ALWAYS(pos + 22 <= iPageMap.Size(), HEAP_PANIC(ETHeapBadCellAddress));
- return iPageMap.Bits(pos+4, 18);
- }
-}
-
-inline void RHybridHeap::PagedZapSize(void* p, unsigned size)
-{iPageMap.Setn(PtrDiff(p, iMemBase) >> (PAGESHIFT-1), paged_codelen(size, iPageSize) ,0);}
-
-inline unsigned RHybridHeap::PagedSize(void* p) const
- { return PagedDecode(PtrDiff(p, iMemBase) >> (PAGESHIFT-1)) << PAGESHIFT; }
-
-inline bool RHybridHeap::PagedSetSize(void* p, unsigned size)
-{ return PagedEncode(PtrDiff(p, iMemBase) >> (PAGESHIFT-1), size >> PAGESHIFT); }
-
-inline void* RHybridHeap::Bitmap2addr(unsigned pos) const
- { return iMemBase + (1 << (PAGESHIFT-1))*pos; }
-
-
-#ifndef QT_SYMBIAN4_ALLOCATOR_UNWANTED_CODE
-//////////////////////////////////////////////////////////////////////////
-//////////////////////////////////////////////////////////////////////////
-//////////////////////////////////////////////////////////////////////////
-/**
-Constructor where minimum and maximum length of the heap can be defined.
-It defaults the chunk heap to be created to have use a new local chunk,
-to have a grow by value of KMinHeapGrowBy, to be unaligned, not to be
-single threaded and not to have any mode flags set.
-
-@param aMinLength The minimum length of the heap to be created.
-@param aMaxLength The maximum length to which the heap to be created can grow.
- If the supplied value is less than a page size, then it
- is discarded and the page size is used instead.
-*/
-EXPORT_C TChunkHeapCreateInfo::TChunkHeapCreateInfo(TInt aMinLength, TInt aMaxLength) :
- iVersionNumber(EVersion0), iMinLength(aMinLength), iMaxLength(aMaxLength),
-iAlign(0), iGrowBy(1), iSingleThread(EFalse),
-iOffset(0), iPaging(EUnspecified), iMode(0), iName(NULL)
-{
-}
-
-
-/**
-Sets the chunk heap to create a new chunk with the specified name.
-
-This overriddes any previous call to TChunkHeapCreateInfo::SetNewChunkHeap() or
-TChunkHeapCreateInfo::SetExistingChunkHeap() for this TChunkHeapCreateInfo object.
-
-@param aName The name to be given to the chunk heap to be created
-If NULL, the function constructs a local chunk to host the heap.
-If not NULL, a pointer to a descriptor containing the name to be
-assigned to the global chunk hosting the heap.
-*/
-EXPORT_C void TChunkHeapCreateInfo::SetCreateChunk(const TDesC* aName)
-{
- iName = (TDesC*)aName;
- iChunk.SetHandle(KNullHandle);
-}
-
-
-/**
-Sets the chunk heap to be created to use the chunk specified.
-
-This overriddes any previous call to TChunkHeapCreateInfo::SetNewChunkHeap() or
-TChunkHeapCreateInfo::SetExistingChunkHeap() for this TChunkHeapCreateInfo object.
-
-@param aChunk A handle to the chunk to use for the heap.
-*/
-EXPORT_C void TChunkHeapCreateInfo::SetUseChunk(const RChunk aChunk)
-{
- iName = NULL;
- iChunk = aChunk;
-}
-
-EXPORT_C RHeap* UserHeap::FixedHeap(TAny* aBase, TInt aMaxLength, TInt aAlign, TBool aSingleThread)
-/**
-Creates a fixed length heap at a specified location.
-
-On successful return from this function, the heap is ready to use. This assumes that
-the memory pointed to by aBase is mapped and able to be used. You must ensure that you
-pass in a large enough value for aMaxLength. Passing in a value that is too small to
-hold the metadata for the heap (~1 KB) will result in the size being rounded up and the
-heap thereby running over the end of the memory assigned to it. But then if you were to
-pass in such as small value then you would not be able to do any allocations from the
-heap anyway. Moral of the story: Use a sensible value for aMaxLength!
-
-@param aBase A pointer to the location where the heap is to be constructed.
-@param aMaxLength The maximum length in bytes to which the heap can grow. If the
- supplied value is too small to hold the heap's metadata, it
- will be increased.
-@param aAlign From Symbian^4 onwards, this value is ignored but EABI 8
- byte alignment is guaranteed for all allocations 8 bytes or
- more in size. 4 byte allocations will be aligned to a 4
- byte boundary. Best to pass in zero.
-@param aSingleThread EFalse if the heap is to be accessed from multiple threads.
- This will cause internal locks to be created, guaranteeing
- thread safety.
-
-@return A pointer to the new heap, or NULL if the heap could not be created.
-
-@panic USER 56 if aMaxLength is negative.
-*/
-{
- __ASSERT_ALWAYS( aMaxLength>=0, ::Panic(ETHeapMaxLengthNegative));
- if ( aMaxLength < (TInt)sizeof(RHybridHeap) )
- aMaxLength = sizeof(RHybridHeap);
-
- RHybridHeap* h = new(aBase) RHybridHeap(aMaxLength, aAlign, aSingleThread);
-
- if (!aSingleThread)
- {
- TInt r = h->iLock.CreateLocal();
- if (r!=KErrNone)
- return NULL; // No need to delete the RHybridHeap instance as the new above is only a placement new
- h->iHandles = (TInt*)&h->iLock;
- h->iHandleCount = 1;
- }
- return h;
-}
-
-/**
-Creates a chunk heap of the type specified by the parameter aCreateInfo.
-
-@param aCreateInfo A reference to a TChunkHeapCreateInfo object specifying the
-type of chunk heap to create.
-
-@return A pointer to the new heap or NULL if the heap could not be created.
-
-@panic USER 41 if the heap's specified minimum length is greater than the specified maximum length.
-@panic USER 55 if the heap's specified minimum length is negative.
-@panic USER 172 if the heap's specified alignment is not a power of 2 or is less than the size of a TAny*.
-*/
-EXPORT_C RHeap* UserHeap::ChunkHeap(const TChunkHeapCreateInfo& aCreateInfo)
-{
- // aCreateInfo must have been configured to use a new chunk or an exiting chunk.
- __ASSERT_ALWAYS(!(aCreateInfo.iMode & (TUint32)~EChunkHeapMask), ::Panic(EHeapCreateInvalidMode));
- RHeap* h = NULL;
-
- if (aCreateInfo.iChunk.Handle() == KNullHandle)
- {
- // A new chunk is to be created for this heap.
-
- __ASSERT_ALWAYS(aCreateInfo.iMinLength >= 0, ::Panic(ETHeapMinLengthNegative));
- __ASSERT_ALWAYS(aCreateInfo.iMaxLength >= aCreateInfo.iMinLength, ::Panic(ETHeapCreateMaxLessThanMin));
-
- TInt maxLength = aCreateInfo.iMaxLength;
- TInt page_size;
- GET_PAGE_SIZE(page_size);
-
- if (maxLength < page_size)
- maxLength = page_size;
-
- TChunkCreateInfo chunkInfo;
-#if USE_HYBRID_HEAP
- if ( aCreateInfo.iOffset )
- chunkInfo.SetNormal(0, maxLength); // Create DL only heap
- else
- {
- maxLength = 2*maxLength;
- chunkInfo.SetDisconnected(0, 0, maxLength); // Create hybrid heap
- }
-#else
- chunkInfo.SetNormal(0, maxLength); // Create DL only heap
-#endif
- chunkInfo.SetOwner((aCreateInfo.iSingleThread)? EOwnerThread : EOwnerProcess);
- if (aCreateInfo.iName)
- chunkInfo.SetGlobal(*aCreateInfo.iName);
- // Set the paging attributes of the chunk.
- if (aCreateInfo.iPaging == TChunkHeapCreateInfo::EPaged)
- chunkInfo.SetPaging(TChunkCreateInfo::EPaged);
- if (aCreateInfo.iPaging == TChunkHeapCreateInfo::EUnpaged)
- chunkInfo.SetPaging(TChunkCreateInfo::EUnpaged);
- // Create the chunk.
- RChunk chunk;
- if (chunk.Create(chunkInfo) != KErrNone)
- return NULL;
- // Create the heap using the new chunk.
- TUint mode = aCreateInfo.iMode | EChunkHeapDuplicate; // Must duplicate the handle.
- h = OffsetChunkHeap(chunk, aCreateInfo.iMinLength, aCreateInfo.iOffset,
- aCreateInfo.iGrowBy, maxLength, aCreateInfo.iAlign,
- aCreateInfo.iSingleThread, mode);
- chunk.Close();
- }
- else
- {
- h = OffsetChunkHeap(aCreateInfo.iChunk, aCreateInfo.iMinLength, aCreateInfo.iOffset,
- aCreateInfo.iGrowBy, aCreateInfo.iMaxLength, aCreateInfo.iAlign,
- aCreateInfo.iSingleThread, aCreateInfo.iMode);
- }
- return h;
-}
-
-
-
-EXPORT_C RHeap* UserHeap::ChunkHeap(const TDesC* aName, TInt aMinLength, TInt aMaxLength, TInt aGrowBy, TInt aAlign, TBool aSingleThread)
-/**
-Creates a heap in a local or global chunk.
-
-The chunk hosting the heap can be local or global.
-
-A local chunk is one which is private to the process creating it and is not
-intended for access by other user processes. A global chunk is one which is
-visible to all processes.
-
-The hosting chunk is local, if the pointer aName is NULL, otherwise the
-hosting chunk is global and the descriptor *aName is assumed to contain
-the name to be assigned to it.
-
-Ownership of the host chunk is vested in the current process.
-
-A minimum and a maximum size for the heap can be specified. On successful
-return from this function, the size of the heap is at least aMinLength.
-If subsequent requests for allocation of memory from the heap cannot be
-satisfied by compressing the heap, the size of the heap is extended in
-increments of aGrowBy until the request can be satisfied. Attempts to extend
-the heap causes the size of the host chunk to be adjusted.
-
-Note that the size of the heap cannot be adjusted by more than aMaxLength.
-
-@param aName If NULL, the function constructs a local chunk to host
- the heap. If not NULL, a pointer to a descriptor containing
- the name to be assigned to the global chunk hosting the heap.
-@param aMinLength The minimum length of the heap in bytes. This will be
- rounded up to the nearest page size by the allocator.
-@param aMaxLength The maximum length in bytes to which the heap can grow. This
- will be rounded up to the nearest page size by the allocator.
-@param aGrowBy The number of bytes by which the heap will grow when more
- memory is required. This will be rounded up to the nearest
- page size by the allocator. If a value is not explicitly
- specified, the page size is taken by default.
-@param aAlign From Symbian^4 onwards, this value is ignored but EABI 8
- byte alignment is guaranteed for all allocations 8 bytes or
- more in size. 4 byte allocations will be aligned to a 4
- byte boundary. Best to pass in zero.
-@param aSingleThread EFalse if the heap is to be accessed from multiple threads.
- This will cause internal locks to be created, guaranteeing
- thread safety.
-
-@return A pointer to the new heap or NULL if the heap could not be created.
-
-@panic USER 41 if aMaxLength is < aMinLength.
-@panic USER 55 if aMinLength is negative.
-@panic USER 56 if aMaxLength is negative.
-*/
- {
- TInt page_size;
- GET_PAGE_SIZE(page_size);
- TInt minLength = _ALIGN_UP(aMinLength, page_size);
- TInt maxLength = Max(aMaxLength, minLength);
-
- TChunkHeapCreateInfo createInfo(minLength, maxLength);
- createInfo.SetCreateChunk(aName);
- createInfo.SetGrowBy(aGrowBy);
- createInfo.SetAlignment(aAlign);
- createInfo.SetSingleThread(aSingleThread);
-
- return ChunkHeap(createInfo);
- }
-#endif // QT_SYMBIAN4_ALLOCATOR_UNWANTED_CODE
-
-EXPORT_C RHeap* UserHeap::ChunkHeap(RChunk aChunk, TInt aMinLength, TInt aGrowBy, TInt aMaxLength, TInt aAlign, TBool aSingleThread, TUint32 aMode)
-/**
-Creates a heap in an existing chunk.
-
-This function is intended to be used to create a heap in a user writable code
-chunk as created by a call to RChunk::CreateLocalCode(). This type of heap can
-be used to hold code fragments from a JIT compiler.
-
-@param aChunk The chunk that will host the heap.
-@param aMinLength The minimum length of the heap in bytes. This will be
- rounded up to the nearest page size by the allocator.
-@param aGrowBy The number of bytes by which the heap will grow when more
- memory is required. This will be rounded up to the nearest
- page size by the allocator. If a value is not explicitly
- specified, the page size is taken by default.
-@param aMaxLength The maximum length in bytes to which the heap can grow. This
- will be rounded up to the nearest page size by the allocator.
- If 0 is passed in, the maximum lengt of the chunk is used.
-@param aAlign From Symbian^4 onwards, this value is ignored but EABI 8
- byte alignment is guaranteed for all allocations 8 bytes or
- more in size. 4 byte allocations will be aligned to a 4
- byte boundary. Best to pass in zero.
-@param aSingleThread EFalse if the heap is to be accessed from multiple threads.
- This will cause internal locks to be created, guaranteeing
- thread safety.
-@param aMode Flags controlling the heap creation. See RAllocator::TFlags.
-
-@return A pointer to the new heap or NULL if the heap could not be created.
-
-@see UserHeap::OffsetChunkHeap()
-*/
- {
- return OffsetChunkHeap(aChunk, aMinLength, 0, aGrowBy, aMaxLength, aAlign, aSingleThread, aMode);
- }
-
-EXPORT_C RHeap* UserHeap::OffsetChunkHeap(RChunk aChunk, TInt aMinLength, TInt aOffset, TInt aGrowBy, TInt aMaxLength, TInt aAlign, TBool aSingleThread, TUint32 aMode)
-/**
-Creates a heap in an existing chunk, offset from the beginning of the chunk.
-
-This function is intended to be used to create a heap using a chunk which has
-some of its memory already used, at the start of that that chunk. The maximum
-length to which the heap can grow is the maximum size of the chunk, minus the
-data at the start of the chunk.
-
-The offset at which to create the heap is passed in as the aOffset parameter.
-Legacy heap implementations always respected the aOffset value, however more
-modern heap implementations are more sophisticated and cannot necessarily respect
-this value. Therefore, if possible, you should always use an aOffset of 0 unless
-you have a very explicit requirement for using a non zero value. Using a non zero
-value will result in a less efficient heap algorithm being used in order to respect
-the offset.
-
-Another issue to consider when using this function is the type of the chunk passed
-in. In order for the most efficient heap algorithms to be used, the chunk passed
-in should always be a disconnected chunk. Passing in a non disconnected chunk will
-again result in a less efficient heap algorithm being used.
-
-Finally, another requirement for the most efficient heap algorithms to be used is
-for the heap to be able to expand. Therefore, unless you have a specific reason to
-do so, always specify aMaxLength > aMinLength.
-
-So, if possible, use aOffset == zero, aMaxLength > aMinLength and a disconnected
-chunk for best results!
-
-@param aChunk The chunk that will host the heap.
-@param aMinLength The minimum length of the heap in bytes. This will be
- rounded up to the nearest page size by the allocator.
-@param aOffset The offset in bytes from the start of the chunk at which to
- create the heap. If used (and it shouldn't really be!)
- then it will be rounded up to a multiple of 8, to respect
- EABI 8 byte alignment requirements.
-@param aGrowBy The number of bytes by which the heap will grow when more
- memory is required. This will be rounded up to the nearest
- page size by the allocator. If a value is not explicitly
- specified, the page size is taken by default.
-@param aMaxLength The maximum length in bytes to which the heap can grow. This
- will be rounded up to the nearest page size by the allocator.
- If 0 is passed in, the maximum length of the chunk is used.
-@param aAlign From Symbian^4 onwards, this value is ignored but EABI 8
- byte alignment is guaranteed for all allocations 8 bytes or
- more in size. 4 byte allocations will be aligned to a 4
- byte boundary. Best to pass in zero.
-@param aSingleThread EFalse if the heap is to be accessed from multiple threads.
- This will cause internal locks to be created, guaranteeing
- thread safety.
-@param aMode Flags controlling the heap creation. See RAllocator::TFlags.
-
-@return A pointer to the new heap or NULL if the heap could not be created.
-
-@panic USER 41 if aMaxLength is < aMinLength.
-@panic USER 55 if aMinLength is negative.
-@panic USER 56 if aMaxLength is negative.
-@panic USER 168 if aOffset is negative.
-*/
- {
- TBool dlOnly = EFalse;
- TInt pageSize;
- GET_PAGE_SIZE(pageSize);
- TInt align = RHybridHeap::ECellAlignment; // Always use EABI 8 byte alignment
-
- __ASSERT_ALWAYS(aMinLength>=0, ::Panic(ETHeapMinLengthNegative));
- __ASSERT_ALWAYS(aMaxLength>=0, ::Panic(ETHeapMaxLengthNegative));
-
- if ( aMaxLength > 0 )
- __ASSERT_ALWAYS(aMaxLength>=aMinLength, ::Panic(ETHeapCreateMaxLessThanMin));
-
- // Stick to EABI alignment for the start offset, if any
- aOffset = _ALIGN_UP(aOffset, align);
-
- // Using an aOffset > 0 means that we can't use the hybrid allocator and have to revert to Doug Lea only
- if (aOffset > 0)
- dlOnly = ETrue;
-
- // Ensure that the minimum length is enough to hold the RHybridHeap object itself
- TInt minCell = _ALIGN_UP(Max((TInt)RHybridHeap::EAllocCellSize, (TInt)RHybridHeap::EFreeCellSize), align);
- TInt hybridHeapSize = (sizeof(RHybridHeap) + minCell);
- if (aMinLength < hybridHeapSize)
- aMinLength = hybridHeapSize;
-
- // Round the minimum length up to a multiple of the page size, taking into account that the
- // offset takes up a part of the chunk's memory
- aMinLength = _ALIGN_UP((aMinLength + aOffset), pageSize);
-
- // If aMaxLength is 0 then use the entire chunk
- TInt chunkSize = aChunk.MaxSize();
- if (aMaxLength == 0)
- {
- aMaxLength = chunkSize;
- }
- // Otherwise round the maximum length up to a multiple of the page size, taking into account that
- // the offset takes up a part of the chunk's memory. We also clip the maximum length to the chunk
- // size, so the user may get a little less than requested if the chunk size is not large enough
- else
- {
- aMaxLength = _ALIGN_UP((aMaxLength + aOffset), pageSize);
- if (aMaxLength > chunkSize)
- aMaxLength = chunkSize;
- }
-
- // If the rounded up values don't make sense then a crazy aMinLength or aOffset must have been passed
- // in, so fail the heap creation
- if (aMinLength > aMaxLength)
- return NULL;
-
- // Adding the offset into the minimum and maximum length was only necessary for ensuring a good fit of
- // the heap into the chunk. Re-adjust them now back to non offset relative sizes
- aMinLength -= aOffset;
- aMaxLength -= aOffset;
-
- // If we are still creating the hybrid allocator (call parameter
- // aOffset is 0 and aMaxLength > aMinLength), we must reduce heap
- // aMaxLength size to the value aMaxLength/2 and set the aOffset to point in the middle of chunk.
- TInt offset = aOffset;
- TInt maxLength = aMaxLength;
- if (!dlOnly && (aMaxLength > aMinLength))
- maxLength = offset = _ALIGN_UP(aMaxLength >> 1, pageSize);
-
- // Try to use commit to map aMinLength physical memory for the heap, taking into account the offset. If
- // the operation fails, suppose that the chunk is not a disconnected heap and try to map physical memory
- // with adjust. In this case, we also can't use the hybrid allocator and have to revert to Doug Lea only
- TBool useAdjust = EFalse;
- TInt r = aChunk.Commit(offset, aMinLength);
- if (r == KErrGeneral)
- {
- dlOnly = useAdjust = ETrue;
- r = aChunk.Adjust(aMinLength);
- if (r != KErrNone)
- return NULL;
- }
- else if (r == KErrNone)
- {
- // We have a disconnected chunk reset aOffset and aMaxlength
- aOffset = offset;
- aMaxLength = maxLength;
- }
-
- else
- return NULL;
-
- // Parameters have been mostly verified and we know whether to use the hybrid allocator or Doug Lea only. The
- // constructor for the hybrid heap will automatically drop back to Doug Lea if it determines that aMinLength
- // == aMaxLength, so no need to worry about that requirement here. The user specified alignment is not used but
- // is passed in so that it can be sanity checked in case the user is doing something totally crazy with it
- RHybridHeap* h = new (aChunk.Base() + aOffset) RHybridHeap(aChunk.Handle(), aOffset, aMinLength, aMaxLength,
- aGrowBy, aAlign, aSingleThread, dlOnly, useAdjust);
-
- if (h->ConstructLock(aMode) != KErrNone)
- return NULL;
-
- // Return the heap address
- return h;
- }
-
-#define UserTestDebugMaskBit(bit) (TBool)(UserSvr::DebugMask(bit>>5) & (1<<(bit&31)))
-
-_LIT(KLitDollarHeap,"$HEAP");
-EXPORT_C TInt UserHeap::CreateThreadHeap(SStdEpocThreadCreateInfo& aInfo, RHeap*& aHeap, TInt aAlign, TBool aSingleThread)
-/**
-@internalComponent
-*/
-//
-// Create a user-side heap
-//
-{
- TInt page_size;
- GET_PAGE_SIZE(page_size);
- TInt minLength = _ALIGN_UP(aInfo.iHeapInitialSize, page_size);
- TInt maxLength = Max(aInfo.iHeapMaxSize, minLength);
-#ifdef ENABLE_BTRACE
- if (UserTestDebugMaskBit(96)) // 96 == KUSERHEAPTRACE in nk_trace.h
- aInfo.iFlags |= ETraceHeapAllocs;
-#endif // ENABLE_BTRACE
- // Create the thread's heap chunk.
- RChunk c;
-#ifndef NO_NAMED_LOCAL_CHUNKS
- TChunkCreateInfo createInfo;
-
- createInfo.SetThreadHeap(0, maxLength, KLitDollarHeap()); // Initialise with no memory committed.
-#if USE_HYBRID_HEAP
- //
- // Create disconnected chunk for hybrid heap with double max length value
- //
- maxLength = 2*maxLength;
- createInfo.SetDisconnected(0, 0, maxLength);
-#endif
-#ifdef SYMBIAN_WRITABLE_DATA_PAGING
- // Set the paging policy of the heap chunk based on the thread's paging policy.
- TUint pagingflags = aInfo.iFlags & EThreadCreateFlagPagingMask;
- switch (pagingflags)
- {
- case EThreadCreateFlagPaged:
- createInfo.SetPaging(TChunkCreateInfo::EPaged);
- break;
- case EThreadCreateFlagUnpaged:
- createInfo.SetPaging(TChunkCreateInfo::EUnpaged);
- break;
- case EThreadCreateFlagPagingUnspec:
- // Leave the chunk paging policy unspecified so the process's
- // paging policy is used.
- break;
- }
-#endif // SYMBIAN_WRITABLE_DATA_PAGING
-
- TInt r = c.Create(createInfo);
-#else
- TInt r = c.CreateDisconnectedLocal(0, 0, maxLength * 2);
-#endif
- if (r!=KErrNone)
- return r;
-
- aHeap = ChunkHeap(c, minLength, page_size, maxLength, aAlign, aSingleThread, EChunkHeapSwitchTo|EChunkHeapDuplicate);
- c.Close();
-
- if ( !aHeap )
- return KErrNoMemory;
-
-#ifdef ENABLE_BTRACE
- if (aInfo.iFlags & ETraceHeapAllocs)
- {
- aHeap->iFlags |= RHeap::ETraceAllocs;
- BTraceContext8(BTrace::EHeap, BTrace::EHeapCreate,(TUint32)aHeap, RHybridHeap::EAllocCellSize);
- TInt chunkId = ((RHandleBase&)((RHybridHeap*)aHeap)->iChunkHandle).BTraceId();
- BTraceContext8(BTrace::EHeap, BTrace::EHeapChunkCreate, (TUint32)aHeap, chunkId);
- }
- if (aInfo.iFlags & EMonitorHeapMemory)
- aHeap->iFlags |= RHeap::EMonitorMemory;
-#endif // ENABLE_BTRACE
-
- return KErrNone;
-}
-
-#endif // __KERNEL_MODE__
-
-#endif /* QT_USE_NEW_SYMBIAN_ALLOCATOR */
+++ /dev/null
-/****************************************************************************
-**
-** Copyright (C) 2011 Nokia Corporation and/or its subsidiary(-ies).
-** All rights reserved.
-** Contact: Nokia Corporation (qt-info@nokia.com)
-**
-** This file is part of the QtCore module of the Qt Toolkit.
-**
-** $QT_BEGIN_LICENSE:LGPL$
-** GNU Lesser General Public License Usage
-** This file may be used under the terms of the GNU Lesser General Public
-** License version 2.1 as published by the Free Software Foundation and
-** appearing in the file LICENSE.LGPL included in the packaging of this
-** file. Please review the following information to ensure the GNU Lesser
-** General Public License version 2.1 requirements will be met:
-** http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
-**
-** In addition, as a special exception, Nokia gives you certain additional
-** rights. These rights are described in the Nokia Qt LGPL Exception
-** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
-**
-** GNU General Public License Usage
-** Alternatively, this file may be used under the terms of the GNU General
-** Public License version 3.0 as published by the Free Software Foundation
-** and appearing in the file LICENSE.GPL included in the packaging of this
-** file. Please review the following information to ensure the GNU General
-** Public License version 3.0 requirements will be met:
-** http://www.gnu.org/copyleft/gpl.html.
-**
-** Other Usage
-** Alternatively, this file may be used in accordance with the terms and
-** conditions contained in a signed written agreement between you and Nokia.
-**
-**
-**
-**
-**
-** $QT_END_LICENSE$
-**
-****************************************************************************/
-
-#ifndef __HEAP_HYBRID_H__
-#define __HEAP_HYBRID_H__
-
-#include <e32cmn.h>
-
-#ifdef __WINS__
-#define USE_HYBRID_HEAP 0
-#else
-#define USE_HYBRID_HEAP 1
-#endif
-
-// This stuff is all temporary in order to prevent having to include dla.h from heap_hybrid.h, which causes
-// problems due to its definition of size_t (and possibly other types). This is unfortunate but we cannot
-// pollute the namespace with these types or it will cause problems with Open C and other POSIX compatibility
-// efforts in Symbian
-
-#define NSMALLBINS (32U)
-#define NTREEBINS (32U)
-
-#ifndef MALLOC_ALIGNMENT
- #define MALLOC_ALIGNMENT ((TUint)8U)
-#endif /* MALLOC_ALIGNMENT */
-
-#define CHUNK_OVERHEAD (sizeof(TUint))
-
-typedef unsigned int bindex_t;
-typedef unsigned int binmap_t;
-typedef struct malloc_chunk* mchunkptr;
-typedef struct malloc_segment msegment;
-typedef struct malloc_state* mstate;
-typedef struct malloc_tree_chunk* tbinptr;
-typedef struct malloc_tree_chunk* tchunkptr;
-
-struct malloc_segment {
- TUint8* iBase; /* base address */
- TUint iSize; /* allocated size */
-};
-
-struct malloc_state {
- binmap_t iSmallMap;
- binmap_t iTreeMap;
- TUint iDvSize;
- TUint iTopSize;
- mchunkptr iDv;
- mchunkptr iTop;
- TUint iTrimCheck;
- mchunkptr iSmallBins[(NSMALLBINS+1)*2];
- tbinptr iTreeBins[NTREEBINS];
- msegment iSeg;
- };
-
-class RHybridHeap : public RHeap
- {
-
-public:
- // declarations copied from Symbian^4 RAllocator and RHeap
- typedef void (*TWalkFunc)(TAny*, RHeap::TCellType, TAny*, TInt);
- enum TFlags {ESingleThreaded=1, EFixedSize=2, ETraceAllocs=4, EMonitorMemory=8,};
- enum TAllocDebugOp
- {
- ECount, EMarkStart, EMarkEnd, ECheck, ESetFail, ECopyDebugInfo, ESetBurstFail, EGetFail,
- EGetSize=48, EGetMaxLength, EGetBase, EAlignInteger, EAlignAddr
- };
- enum TDebugOp { EWalk = 128, EHybridHeap };
- enum THybridAllocFail
- {
- ERandom, ETrueRandom, EDeterministic, EHybridNone, EFailNext, EReset, EBurstRandom,
- EBurstTrueRandom, EBurstDeterministic, EBurstFailNext, ECheckFailure,
- };
- enum { EDebugHdrSize = sizeof(SDebugCell) };
-#ifndef SYMBIAN_ENABLE_SPLIT_HEADERS
- struct SRAllocatorBurstFail {TInt iBurst; TInt iRate; TInt iUnused[2];};
-#endif
-
- struct HeapInfo
- {
- unsigned iFootprint;
- unsigned iMaxSize;
- unsigned iAllocBytes;
- unsigned iAllocN;
- unsigned iFreeBytes;
- unsigned iFreeN;
- };
-
- struct SHeapCellInfo { RHybridHeap* iHeap; TInt iTotalAlloc; TInt iTotalAllocSize; TInt iTotalFree; TInt iLevelAlloc; SDebugCell* iStranded; };
-
-
- /**
- @internalComponent
- */
- enum TAllocatorType
- {ESlabAllocator, EDougLeaAllocator, EPageAllocator, EFullSlab=0x80, EPartialFullSlab=0x40, EEmptySlab=0x20, ESlabSpare=0x10, ESlabMask=0xf0};
-
-
- /**
- @internalComponent
- */
- struct SWalkInfo {
- /**
- Walk function address shall be called
- */
- TWalkFunc iFunction;
-
- /**
- The first parameter for callback function
- */
- TAny* iParam;
- /**
- Pointer to RHybridHeap object
- */
- RHybridHeap* iHeap;
- };
-
- /**
- @internalComponent
- */
- struct SConfig {
- /**
- Required slab configuration ( bit 0=4, bit 1=8 ..
- bit 13 = 56)
- */
- TUint32 iSlabBits;
- /**
- Delayed slab threshold in bytes (0 = no threshold)
- */
- TInt iDelayedSlabThreshold;
- /**
- 2^n is smallest size allocated in paged allocator (14-31 = 16 Kb --> )
- */
- TInt iPagePower;
-
- };
-
- /**
- @internalComponent
-
- This structure is used by test code for configuring the allocators and obtaining information
- from them in order to ensure they are behaving as required. This is internal test specific
- code and is liable to be changed without warning at any time. You should under no circumstances
- be using it!
- */
- struct STestCommand
- {
- TInt iCommand; // The test related command to be executed
-
- union
- {
- SConfig iConfig; // Configuration used by test code only
- TAny* iData; // Extra supporting data for the test command
- };
- };
-
- /**
- @internalComponent
-
- Commands used by test code for configuring the allocators and obtaining information them them
- */
- enum TTestCommand { EGetConfig, ESetConfig, EHeapMetaData, ETestData };
-
- virtual TAny* Alloc(TInt aSize);
- virtual void Free(TAny* aPtr);
- virtual TAny* ReAlloc(TAny* aPtr, TInt aSize, TInt aMode=0);
- virtual TInt AllocLen(const TAny* aCell) const;
-#ifndef __KERNEL_MODE__
- virtual TInt Compress();
- virtual void Reset();
- virtual TInt AllocSize(TInt& aTotalAllocSize) const;
- virtual TInt Available(TInt& aBiggestBlock) const;
-#endif
- virtual TInt DebugFunction(TInt aFunc, TAny* a1=NULL, TAny* a2=NULL);
-protected:
- virtual TInt Extension_(TUint aExtensionId, TAny*& a0, TAny* a1);
-
-public:
- TAny* operator new(TUint aSize, TAny* aBase) __NO_THROW;
- void operator delete(TAny*, TAny*);
-
-private:
- TInt DoCountAllocFree(TInt& aFree);
- TInt DoCheckHeap(SCheckInfo* aInfo);
- void DoMarkStart();
- TUint32 DoMarkEnd(TInt aExpected);
- void DoSetAllocFail(TAllocFail aType, TInt aRate);
- TBool CheckForSimulatedAllocFail();
- void DoSetAllocFail(TAllocFail aType, TInt aRate, TUint aBurst);
-
- void Lock() const;
- void Unlock() const;
- TInt ChunkHandle() const;
-
- RHybridHeap(TInt aChunkHandle, TInt aOffset, TInt aMinLength, TInt aMaxLength, TInt aGrowBy, TInt aAlign, TBool aSingleThread, TBool aDlOnly, TBool aUseAdjust);
- RHybridHeap(TInt aMaxLength, TInt aAlign=0, TBool aSingleThread=ETrue);
- RHybridHeap();
-
- void Init(TInt aBitmapSlab, TInt aPagePower);
- inline void InitBins(mstate m);
- inline void InitTop(mstate m, mchunkptr p, TUint psize);
- void* SysAlloc(mstate m, TUint nb);
- int SysTrim(mstate m, TUint pad);
- void* TmallocLarge(mstate m, TUint nb);
- void* TmallocSmall(mstate m, TUint nb);
- /*MACROS converted functions*/
- static inline void UnlinkFirstSmallChunk(mstate M,mchunkptr B,mchunkptr P,bindex_t& I);
- static inline void InsertSmallChunk(mstate M,mchunkptr P, TUint S);
- static inline void InsertChunk(mstate M,mchunkptr P,TUint S);
- static inline void UnlinkLargeChunk(mstate M,tchunkptr X);
- static inline void UnlinkSmallChunk(mstate M, mchunkptr P,TUint S);
- static inline void UnlinkChunk(mstate M, mchunkptr P, TUint S);
- static inline void ComputeTreeIndex(TUint S, bindex_t& I);
- static inline void InsertLargeChunk(mstate M,tchunkptr X,TUint S);
- static inline void ReplaceDv(mstate M, mchunkptr P, TUint S);
- static inline void ComputeBit2idx(binmap_t X,bindex_t& I);
-
- void DoComputeTreeIndex(TUint S, bindex_t& I);
- void DoCheckAnyChunk(mstate m, mchunkptr p);
- void DoCheckTopChunk(mstate m, mchunkptr p);
- void DoCheckInuseChunk(mstate m, mchunkptr p);
- void DoCheckFreeChunk(mstate m, mchunkptr p);
- void DoCheckMallocedChunk(mstate m, void* mem, TUint s);
- void DoCheckTree(mstate m, tchunkptr t);
- void DoCheckTreebin(mstate m, bindex_t i);
- void DoCheckSmallbin(mstate m, bindex_t i);
- TInt BinFind(mstate m, mchunkptr x);
- TUint TraverseAndCheck(mstate m);
- void DoCheckMallocState(mstate m);
-
- TInt GetInfo(struct HeapInfo* i, SWalkInfo* wi=NULL) const;
- void InitDlMalloc(TUint capacity, int locked);
- void* DlMalloc(TUint);
- void DlFree(void*);
- void* DlRealloc(void*, TUint, TInt);
- TUint DlInfo(struct HeapInfo* i, SWalkInfo* wi) const;
- void DoCheckCommittedSize(TInt aNPages, mstate aM);
-
- TAny* ReAllocImpl(TAny* aPtr, TInt aSize, TInt aMode);
- void Construct(TBool aSingleThread, TBool aDLOnly, TBool aUseAdjust, TInt aAlign);
-#ifndef __KERNEL_MODE__
- TInt ConstructLock(TUint32 aMode);
-#endif
- static void Walk(SWalkInfo* aInfo, TAny* aBfr, TInt aLth, TCellType aBfrType, TAllocatorType aAlloctorType);
- static void WalkCheckCell(TAny* aPtr, TCellType aType, TAny* aCell, TInt aLen);
- void* Map(void* p, TInt sz);
- void Unmap(void* p,TInt sz);
-
-private:
- TInt iMinLength;
- TInt iOffset; // offset of RHeap object from chunk base
- TInt iGrowBy;
- TInt iMinCell;
- TInt iPageSize;
-
- // Temporarily commented out and exported from RHeap to prevent source breaks from req417-52840.
- // This will be moved with another REQ after submission and subsequent fixing of bad code
- //TInt iNestingLevel;
- TInt iAllocCount;
- // Temporarily commented out. See comment above regarding req417-52840 source breaks
- //TAllocFail iFailType;
- TInt iFailRate;
- TBool iFailed;
- TInt iFailAllocCount;
- TInt iRand;
- // Temporarily commented out. See comment above regarding req417-52840 source breaks
- //TAny* iTestData;
-
- TInt iChunkSize;
- TInt iHighWaterMark;
- TBool iUseAdjust;
- TBool iDLOnly;
-
- malloc_state iGlobalMallocState;
-
-#ifdef __KERNEL_MODE__
-
- friend class RHeapK;
-
-#else
-
- friend class UserHeap;
- friend class HybridHeap;
- friend class TestHybridHeap;
-
-private:
-
- static void TreeRemove(slab* s);
- static void TreeInsert(slab* s,slab** r);
-
- enum {EOkBits = (1<<(MAXSLABSIZE>>2))-1};
-
- void SlabInit();
- void SlabConfig(unsigned slabbitmap);
- void* SlabAllocate(slabset& allocator);
- void SlabFree(void* p);
- void* AllocNewSlab(slabset& allocator);
- void* AllocNewPage(slabset& allocator);
- void* InitNewSlab(slabset& allocator, slab* s);
- void FreeSlab(slab* s);
- void FreePage(page* p);
- void SlabInfo(struct HeapInfo* i, SWalkInfo* wi) const;
- static void SlabFullInfo(slab* s, struct HeapInfo* i, SWalkInfo* wi);
- static void SlabPartialInfo(slab* s, struct HeapInfo* i, SWalkInfo* wi);
- static void SlabEmptyInfo(slab* s, struct HeapInfo* i, SWalkInfo* wi);
- static void TreeWalk(slab* const* root, void (*f)(slab*, struct HeapInfo*, SWalkInfo*), struct HeapInfo* i, SWalkInfo* wi);
-
- static void WalkPartialFullSlab(SWalkInfo* aInfo, slab* aSlab, TCellType aBfrType, TInt aLth);
- static void WalkFullSlab(SWalkInfo* aInfo, slab* aSlab, TCellType aBfrType, TInt aLth);
- void DoCheckSlab(slab* aSlab, TAllocatorType aSlabType, TAny* aBfr=NULL);
- void DoCheckSlabTrees();
- void DoCheckSlabTree(slab** aS, TBool aPartialPage);
- void BuildPartialSlabBitmap(TUint32* aBitmap, slab* aSlab, TAny* aBfr=NULL);
-
- static inline unsigned SlabHeaderFree(unsigned h)
- {return (h&0x000000ff);}
- static inline unsigned SlabHeaderPagemap(unsigned h)
- {return (h&0x00000f00)>>8;}
- static inline unsigned SlabHeaderSize(unsigned h)
- {return (h&0x0003f000)>>12;}
- static inline unsigned SlabHeaderUsedm4(unsigned h)
- {return (h&0x0ffc0000)>>18;}
- /***paged allocator code***/
- void PagedInit(TInt aPagePower);
- void* PagedAllocate(unsigned size);
- void PagedFree(void* p);
- void* PagedReallocate(void* p, unsigned size, TInt mode);
-
- bool PagedEncode(unsigned pos, unsigned npage);
- unsigned PagedDecode(unsigned pos) const;
- inline unsigned PagedSize(void* p) const;
- inline bool PagedSetSize(void* p, unsigned size);
- inline void PagedZapSize(void* p, unsigned size);
- inline void* Bitmap2addr(unsigned pos) const;
- void PagedInfo(struct HeapInfo* i, SWalkInfo* wi) const;
- void ResetBitmap();
- TBool CheckBitmap(void* aBfr, TInt aSize, TUint32& aDummy, TInt& aNPages);
-
-private:
- paged_bitmap iPageMap; // bitmap representing page allocator's pages
- TUint8* iMemBase; // bottom of paged/slab memory (chunk base)
- TUint8 iBitMapBuffer[MAXSMALLPAGEBITS>>3]; // buffer for initial page bitmap
- TInt iSlabThreshold; // allocations < than this are done by the slab allocator
- TInt iPageThreshold; // 2^n is smallest cell size allocated in paged allocator
- TInt iSlabInitThreshold; // slab allocator will be used after chunk reaches this size
- TUint32 iSlabConfigBits; // set of bits that specify which slab sizes to use
- slab* iPartialPage; // partial-use page tree
- slab* iFullSlab; // full slabs list (so we can find them when walking)
- page* iSparePage; // cached, to avoid kernel exec calls for unmapping/remapping
- TUint8 iSizeMap[(MAXSLABSIZE>>2)+1]; // index of slabset indexes based on size class
- slabset iSlabAlloc[MAXSLABSIZE>>2]; // array of pointers to slabsets
-
-#endif // __KERNEL_MODE__
-};
-
-#define HEAP_ASSERT(x) __ASSERT_DEBUG(x, HEAP_PANIC(ETHeapBadCellAddress))
-
-template <class T> inline T Floor(const T addr, unsigned aln)
-{return T((unsigned(addr))&~(aln-1));}
-template <class T> inline T Ceiling(T addr, unsigned aln)
-{return T((unsigned(addr)+(aln-1))&~(aln-1));}
-template <class T> inline unsigned LowBits(T addr, unsigned aln)
-{return unsigned(addr)&(aln-1);}
-template <class T1, class T2> inline int PtrDiff(const T1* a1, const T2* a2)
-{return reinterpret_cast<const unsigned char*>(a1) - reinterpret_cast<const unsigned char*>(a2);}
-template <class T> inline T Offset(T addr, unsigned ofs)
-{return T(unsigned(addr)+ofs);}
-
-#endif //__HEAP_HYBRID_H__
+++ /dev/null
-/****************************************************************************
-**
-** Copyright (C) 2011 Nokia Corporation and/or its subsidiary(-ies).
-** All rights reserved.
-** Contact: Nokia Corporation (qt-info@nokia.com)
-**
-** This file is part of the QtCore module of the Qt Toolkit.
-**
-** $QT_BEGIN_LICENSE:LGPL$
-** GNU Lesser General Public License Usage
-** This file may be used under the terms of the GNU Lesser General Public
-** License version 2.1 as published by the Free Software Foundation and
-** appearing in the file LICENSE.LGPL included in the packaging of this
-** file. Please review the following information to ensure the GNU Lesser
-** General Public License version 2.1 requirements will be met:
-** http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
-**
-** In addition, as a special exception, Nokia gives you certain additional
-** rights. These rights are described in the Nokia Qt LGPL Exception
-** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
-**
-** GNU General Public License Usage
-** Alternatively, this file may be used under the terms of the GNU General
-** Public License version 3.0 as published by the Free Software Foundation
-** and appearing in the file LICENSE.GPL included in the packaging of this
-** file. Please review the following information to ensure the GNU General
-** Public License version 3.0 requirements will be met:
-** http://www.gnu.org/copyleft/gpl.html.
-**
-** Other Usage
-** Alternatively, this file may be used in accordance with the terms and
-** conditions contained in a signed written agreement between you and Nokia.
-**
-**
-**
-**
-**
-** $QT_END_LICENSE$
-**
-****************************************************************************/
-
-#ifndef __KERNEL_MODE__
-
-const int MAXSMALLPAGEBITS = 68<<3;
-#define MINPAGEPOWER PAGESHIFT+2
-
-struct paged_bitmap
-{
- public:
- inline paged_bitmap() : iBase(0), iNbits(0) {}
- void Init(unsigned char* p, unsigned size, unsigned bit);
-//
- inline unsigned char* Addr() const;
- inline unsigned Size() const;
-//
- inline void Set(unsigned ix, unsigned bit);
- inline unsigned operator[](unsigned ix) const;
- bool Is(unsigned ix, unsigned len, unsigned bit) const;
- void Set(unsigned ix, unsigned len, unsigned val);
- void Setn(unsigned ix, unsigned len, unsigned bit);
- unsigned Bits(unsigned ix, unsigned len) const; // little endian
- int Find(unsigned start, unsigned bit) const;
- private:
- unsigned char* iBase;
- unsigned iNbits;
-};
-
-#endif // __KERNEL_MODE__
+++ /dev/null
-/****************************************************************************
-**
-** Copyright (C) 2011 Nokia Corporation and/or its subsidiary(-ies).
-** All rights reserved.
-** Contact: Nokia Corporation (qt-info@nokia.com)
-**
-** This file is part of the QtCore module of the Qt Toolkit.
-**
-** $QT_BEGIN_LICENSE:LGPL$
-** GNU Lesser General Public License Usage
-** This file may be used under the terms of the GNU Lesser General Public
-** License version 2.1 as published by the Free Software Foundation and
-** appearing in the file LICENSE.LGPL included in the packaging of this
-** file. Please review the following information to ensure the GNU Lesser
-** General Public License version 2.1 requirements will be met:
-** http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
-**
-** In addition, as a special exception, Nokia gives you certain additional
-** rights. These rights are described in the Nokia Qt LGPL Exception
-** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
-**
-** GNU General Public License Usage
-** Alternatively, this file may be used under the terms of the GNU General
-** Public License version 3.0 as published by the Free Software Foundation
-** and appearing in the file LICENSE.GPL included in the packaging of this
-** file. Please review the following information to ensure the GNU General
-** Public License version 3.0 requirements will be met:
-** http://www.gnu.org/copyleft/gpl.html.
-**
-** Other Usage
-** Alternatively, this file may be used in accordance with the terms and
-** conditions contained in a signed written agreement between you and Nokia.
-**
-**
-**
-**
-**
-** $QT_END_LICENSE$
-**
-** This file implements the generic atomics interface using ARMv6 assembly
-** instructions. It is more efficient than the inline versions when Qt is
-** built for the THUMB instruction set, as the required instructions are
-** only available in ARM state.
-****************************************************************************/
-
-#include <QtCore/qglobal.h>
-
-#ifdef QT_HAVE_ARMV6
-#ifndef SYMBIAN_E32_ATOMIC_API
-
-QT_BEGIN_NAMESPACE
-
-QT_USE_NAMESPACE
-
-#ifdef Q_CC_RVCT
-#pragma push
-#pragma arm
-Q_CORE_EXPORT asm
-bool QBasicAtomicInt_testAndSetRelaxed(volatile int *_q_value, int expectedValue, int newValue)
-{
- CODE32
- //fall through
-}
-Q_CORE_EXPORT asm
-bool QBasicAtomicInt_testAndSetAcquire(volatile int *_q_value, int expectedValue, int newValue)
-{
- CODE32
- //fall through
-}
-Q_CORE_EXPORT asm
-bool QBasicAtomicInt_testAndSetRelease(volatile int *_q_value, int expectedValue, int newValue)
-{
- CODE32
- //fall through
-}
-Q_CORE_EXPORT asm
-bool QBasicAtomicInt_testAndSetOrdered(volatile int *_q_value, int expectedValue, int newValue)
-{
- CODE32
- //R0 = _q_value
- //R1 = expectedValue
- //R2 = newValue
-retry_testAndSetOrdered
- LDREX r3,[r0] //r3 = *_q_value
- EORS r3,r3,r1 //if (r3 == expectedValue) {
- STREXEQ r3,r2,[r0] //*_q_value = newvalue, r3 = error
- TEQEQ r3,#1 //if error
- BEQ retry_testAndSetOrdered //then goto retry }
- RSBS r0,r3,#1 //return (r3 == 0)
- MOVCC r0,#0
- BX r14
-}
-
-Q_CORE_EXPORT asm
-int QBasicAtomicInt_fetchAndStoreRelaxed(volatile int *_q_value, int newValue)
-{
- CODE32
- //fall through
-}
-Q_CORE_EXPORT asm
-int QBasicAtomicInt_fetchAndStoreAcquire(volatile int *_q_value, int newValue)
-{
- CODE32
- //fall through
-}
-Q_CORE_EXPORT asm
-int QBasicAtomicInt_fetchAndStoreRelease(volatile int *_q_value, int newValue)
-{
- CODE32
- //fall through
-}
-Q_CORE_EXPORT asm
-int QBasicAtomicInt_fetchAndStoreOrdered(volatile int *_q_value, int newValue)
-{
- CODE32
-//R0 = _q_value
-//R1 = newValue
-retry_fetchAndStoreOrdered
- LDREX r3,[r0] //r3 = *_q_value
- STREX r2,r1,[r0] //*_q_value = newValue, r2 = error
- TEQ r2,#0 //if error
- BNE retry_fetchAndStoreOrdered //then goto retry
- MOV r0,r3 //return r3
- BX r14
-}
-
-Q_CORE_EXPORT asm
-int QBasicAtomicInt_fetchAndAddRelaxed(volatile int *_q_value, int valueToAdd)
-{
- CODE32
- //fall through
-}
-Q_CORE_EXPORT asm
-int QBasicAtomicInt_fetchAndAddAcquire(volatile int *_q_value, int valueToAdd)
-{
- CODE32
- //fall through
-}
-Q_CORE_EXPORT asm
-int QBasicAtomicInt_fetchAndAddRelease(volatile int *_q_value, int valueToAdd)
-{
- CODE32
- //fall through
-}
-Q_CORE_EXPORT asm
-int QBasicAtomicInt_fetchAndAddOrdered(volatile int *_q_value, int valueToAdd)
-{
- CODE32
- //R0 = _q_value
- //R1 = valueToAdd
- STMDB sp!,{r12,lr}
-retry_fetchAndAddOrdered
- LDREX r2,[r0] //r2 = *_q_value
- ADD r3,r2,r1 //r3 = r2 + r1
- STREX r12,r3,[r0] //*_q_value = r3, r12 = error
- TEQ r12,#0 //if error
- BNE retry_fetchAndAddOrdered //then retry
- MOV r0,r2 //return r2
- LDMIA sp!,{r12,pc}
-}
-
-Q_CORE_EXPORT asm
-bool QBasicAtomicPointer_testAndSetRelaxed(void * volatile *_q_value,
- void *expectedValue,
- void *newValue)
-{
- CODE32
- //fall through
-}
-Q_CORE_EXPORT asm
-bool QBasicAtomicPointer_testAndSetRelease(void * volatile *_q_value,
- void *expectedValue,
- void *newValue)
-{
- CODE32
- //fall through
-}
-Q_CORE_EXPORT asm
-bool QBasicAtomicPointer_testAndSetAcquire(void * volatile *_q_value,
- void *expectedValue,
- void *newValue)
-{
- CODE32
- //fall through
-}
-Q_CORE_EXPORT asm
-bool QBasicAtomicPointer_testAndSetOrdered(void * volatile *_q_value,
- void *expectedValue,
- void *newValue)
-{
- CODE32
- //R0 = _q_value
- //R1 = expectedValue
- //R2 = newValue
-retryPointer_testAndSetOrdered
- LDREX r3,[r0] //r3 = *_q_value
- EORS r3,r3,r1 //if (r3 == expectedValue) {
- STREXEQ r3,r2,[r0] //*_q_value = newvalue, r3 = error
- TEQEQ r3,#1 //if error
- BEQ retryPointer_testAndSetOrdered //then goto retry }
- RSBS r0,r3,#1 //return (r3 == 0)
- MOVCC r0,#0
- BX r14
-}
-
-Q_CORE_EXPORT asm
-void *QBasicAtomicPointer_fetchAndStoreRelaxed(void * volatile *_q_value, void *newValue)
-{
- CODE32
- //fall through
-}
-Q_CORE_EXPORT asm
-void *QBasicAtomicPointer_fetchAndStoreAcquire(void * volatile *_q_value, void *newValue)
-{
- CODE32
- //fall through
-}
-Q_CORE_EXPORT asm
-void *QBasicAtomicPointer_fetchAndStoreRelease(void * volatile *_q_value, void *newValue)
-{
- CODE32
- //fall through
-}
-Q_CORE_EXPORT asm
-void *QBasicAtomicPointer_fetchAndStoreOrdered(void * volatile *_q_value, void *newValue)
-{
- CODE32
- //R0 = _q_value
- //R1 = newValue
-retryPointer_fetchAndStoreOrdered
- LDREX r3,[r0] //r3 = *_q_value
- STREX r2,r1,[r0] //*_q_value = newValue, r2 = error
- TEQ r2,#0 //if error
- BNE retryPointer_fetchAndStoreOrdered //then goto retry
- MOV r0,r3 //return r3
- BX r14
-}
-
-Q_CORE_EXPORT asm
-void *QBasicAtomicPointer_fetchAndAddRelaxed(void * volatile *_q_value, qptrdiff valueToAdd)
-{
- CODE32
- //fall through
-}
-Q_CORE_EXPORT asm
-void *QBasicAtomicPointer_fetchAndAddRelease(void * volatile *_q_value, qptrdiff valueToAdd)
-{
- CODE32
- //fall through
-}
-Q_CORE_EXPORT asm
-void *QBasicAtomicPointer_fetchAndAddAcquire(void * volatile *_q_value, qptrdiff valueToAdd)
-{
- CODE32
- //fall through
-}
-Q_CORE_EXPORT asm
-void *QBasicAtomicPointer_fetchAndAddOrdered(void * volatile *_q_value, qptrdiff valueToAdd)
-{
- CODE32
- //R0 = _q_value
- //R1 = valueToAdd
- STMDB sp!,{r12,lr}
-retryPointer_fetchAndAddOrdered
- LDREX r2,[r0] //r2 = *_q_value
- ADD r3,r2,r1 //r3 = r2 + r1
- STREX r12,r3,[r0] //*_q_value = r3, r12 = error
- TEQ r12,#0 //if error
- BNE retryPointer_fetchAndAddOrdered //then retry
- MOV r0,r2 //return r2
- LDMIA sp!,{r12,pc}
-}
-
-#pragma pop
-#elif defined (Q_CC_GCCE)
-Q_CORE_EXPORT __declspec( naked )
-bool QBasicAtomicInt_testAndSetRelaxed(volatile int *_q_value, int expectedValue, int newValue)
-{
- //fall through
-}
-Q_CORE_EXPORT __declspec( naked )
-bool QBasicAtomicInt_testAndSetAcquire(volatile int *_q_value, int expectedValue, int newValue)
-{
- //fall through
-}
-Q_CORE_EXPORT __declspec( naked )
-bool QBasicAtomicInt_testAndSetRelease(volatile int *_q_value, int expectedValue, int newValue)
-{
- //fall through
-}
-Q_CORE_EXPORT __declspec( naked )
-bool QBasicAtomicInt_testAndSetOrdered(volatile int *_q_value, int expectedValue, int newValue)
-{
- //R0 = _q_value
- //R1 = expectedValue
- //R2 = newValue
- asm("retry_testAndSetOrdered:");
- asm(" LDREX r3,[r0]"); //r3 = *_q_value
- asm(" EORS r3,r3,r1"); //if (r3 == expectedValue) {
- asm(" STREXEQ r3,r2,[r0]"); //*_q_value = newvalue, r3 = error
- asm(" TEQEQ r3,#1"); //if error
- asm(" BEQ retry_testAndSetOrdered"); //then goto retry }
- asm(" RSBS r0,r3,#1"); //return (r3 == 0)
- asm(" MOVCC r0,#0");
- asm(" BX r14");
-}
-
-Q_CORE_EXPORT __declspec( naked )
-int QBasicAtomicInt_fetchAndStoreRelaxed(volatile int *_q_value, int newValue)
-{
- //fall through
-}
-Q_CORE_EXPORT __declspec( naked )
-int QBasicAtomicInt_fetchAndStoreAcquire(volatile int *_q_value, int newValue)
-{
- //fall through
-}
-Q_CORE_EXPORT __declspec( naked )
-int QBasicAtomicInt_fetchAndStoreRelease(volatile int *_q_value, int newValue)
-{
- //fall through
-}
-Q_CORE_EXPORT __declspec( naked )
-int QBasicAtomicInt_fetchAndStoreOrdered(volatile int *_q_value, int newValue)
-{
-//R0 = _q_value
-//R1 = newValue
- asm("retry_fetchAndStoreOrdered:");
- asm(" LDREX r3,[r0]"); //r3 = *_q_value
- asm(" STREX r2,r1,[r0]"); //*_q_value = newValue, r2 = error
- asm(" TEQ r2,#0"); //if error
- asm(" BNE retry_fetchAndStoreOrdered"); //then goto retry
- asm(" MOV r0,r3"); //return r3
- asm(" BX r14");
-}
-
-Q_CORE_EXPORT __declspec( naked )
-int QBasicAtomicInt_fetchAndAddRelaxed(volatile int *_q_value, int valueToAdd)
-{
- //fall through
-}
-Q_CORE_EXPORT __declspec( naked )
-int QBasicAtomicInt_fetchAndAddAcquire(volatile int *_q_value, int valueToAdd)
-{
- //fall through
-}
-Q_CORE_EXPORT __declspec( naked )
-int QBasicAtomicInt_fetchAndAddRelease(volatile int *_q_value, int valueToAdd)
-{
- //fall through
-}
-Q_CORE_EXPORT __declspec( naked )
-int QBasicAtomicInt_fetchAndAddOrdered(volatile int *_q_value, int valueToAdd)
-{
- //R0 = _q_value
- //R1 = valueToAdd
- asm(" STMDB sp!,{r12,lr}");
- asm("retry_fetchAndAddOrdered:");
- asm(" LDREX r2,[r0]"); //r2 = *_q_value
- asm(" ADD r3,r2,r1 "); //r3 = r2 + r1
- asm(" STREX r12,r3,[r0]"); //*_q_value = r3, r12 = error
- asm(" TEQ r12,#0"); //if error
- asm(" BNE retry_fetchAndAddOrdered"); //then retry
- asm(" MOV r0,r2"); //return r2
- asm(" LDMIA sp!,{r12,pc}");
-}
-
-Q_CORE_EXPORT __declspec( naked )
-bool QBasicAtomicPointer_testAndSetRelaxed(void * volatile *_q_value,
- void *expectedValue,
- void *newValue)
-{
- //fall through
-}
-Q_CORE_EXPORT __declspec( naked )
-bool QBasicAtomicPointer_testAndSetRelease(void * volatile *_q_value,
- void *expectedValue,
- void *newValue)
-{
- //fall through
-}
-Q_CORE_EXPORT __declspec( naked )
-bool QBasicAtomicPointer_testAndSetAcquire(void * volatile *_q_value,
- void *expectedValue,
- void *newValue)
-{
- //fall through
-}
-Q_CORE_EXPORT __declspec( naked )
-bool QBasicAtomicPointer_testAndSetOrdered(void * volatile *_q_value,
- void *expectedValue,
- void *newValue)
-{
- //R0 = _q_value
- //R1 = expectedValue
- //R2 = newValue
- asm("retryPointer_testAndSetOrdered:");
- asm(" LDREX r3,[r0]"); //r3 = *_q_value
- asm(" EORS r3,r3,r1"); //if (r3 == expectedValue) {
- asm(" STREXEQ r3,r2,[r0]"); //*_q_value = newvalue, r3 = error
- asm(" TEQEQ r3,#1"); //if error
- asm(" BEQ retryPointer_testAndSetOrdered"); //then goto retry }
- asm(" RSBS r0,r3,#1"); //return (r3 == 0)
- asm(" MOVCC r0,#0");
- asm(" BX r14");
-}
-
-Q_CORE_EXPORT __declspec( naked )
-void *QBasicAtomicPointer_fetchAndStoreRelaxed(void * volatile *_q_value, void *newValue)
-{
- //fall through
-}
-Q_CORE_EXPORT __declspec( naked )
-void *QBasicAtomicPointer_fetchAndStoreAcquire(void * volatile *_q_value, void *newValue)
-{
- //fall through
-}
-Q_CORE_EXPORT __declspec( naked )
-void *QBasicAtomicPointer_fetchAndStoreRelease(void * volatile *_q_value, void *newValue)
-{
- //fall through
-}
-Q_CORE_EXPORT __declspec( naked )
-void *QBasicAtomicPointer_fetchAndStoreOrdered(void * volatile *_q_value, void *newValue)
-{
- //R0 = _q_value
- //R1 = newValue
- asm("retryPointer_fetchAndStoreOrdered:");
- asm(" LDREX r3,[r0]"); //r3 = *_q_value
- asm(" STREX r2,r1,[r0]"); //*_q_value = newValue, r2 = error
- asm(" TEQ r2,#0"); //if error
- asm(" BNE retryPointer_fetchAndStoreOrdered"); //then goto retry
- asm(" MOV r0,r3"); //return r3
- asm(" BX r14");
-}
-
-Q_CORE_EXPORT __declspec( naked )
-void *QBasicAtomicPointer_fetchAndAddRelaxed(void * volatile *_q_value, qptrdiff valueToAdd)
-{
- //fall through
-}
-Q_CORE_EXPORT __declspec( naked )
-void *QBasicAtomicPointer_fetchAndAddRelease(void * volatile *_q_value, qptrdiff valueToAdd)
-{
- //fall through
-}
-Q_CORE_EXPORT __declspec( naked )
-void *QBasicAtomicPointer_fetchAndAddAcquire(void * volatile *_q_value, qptrdiff valueToAdd)
-{
- //fall through
-}
-Q_CORE_EXPORT __declspec( naked )
-void *QBasicAtomicPointer_fetchAndAddOrdered(void * volatile *_q_value, qptrdiff valueToAdd)
-{
- //R0 = _q_value
- //R1 = valueToAdd
- asm(" STMDB sp!,{r12,lr}");
- asm("retryPointer_fetchAndAddOrdered:");
- asm(" LDREX r2,[r0]"); //r2 = *_q_value
- asm(" ADD r3,r2,r1"); //r3 = r2 + r1
- asm(" STREX r12,r3,[r0]"); //*_q_value = r3, r12 = error
- asm(" TEQ r12,#0"); //if error
- asm(" BNE retryPointer_fetchAndAddOrdered"); //then retry
- asm(" MOV r0,r2"); //return r2
- asm(" LDMIA sp!,{r12,pc}");
-}
-#else
-#error unknown arm compiler
-#endif
-QT_END_NAMESPACE
-#endif
-#endif
+++ /dev/null
-/****************************************************************************
-**
-** Copyright (C) 2011 Nokia Corporation and/or its subsidiary(-ies).
-** All rights reserved.
-** Contact: Nokia Corporation (qt-info@nokia.com)
-**
-** This file is part of the QtCore module of the Qt Toolkit.
-**
-** $QT_BEGIN_LICENSE:LGPL$
-** GNU Lesser General Public License Usage
-** This file may be used under the terms of the GNU Lesser General Public
-** License version 2.1 as published by the Free Software Foundation and
-** appearing in the file LICENSE.LGPL included in the packaging of this
-** file. Please review the following information to ensure the GNU Lesser
-** General Public License version 2.1 requirements will be met:
-** http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
-**
-** In addition, as a special exception, Nokia gives you certain additional
-** rights. These rights are described in the Nokia Qt LGPL Exception
-** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
-**
-** GNU General Public License Usage
-** Alternatively, this file may be used under the terms of the GNU General
-** Public License version 3.0 as published by the Free Software Foundation
-** and appearing in the file LICENSE.GPL included in the packaging of this
-** file. Please review the following information to ensure the GNU General
-** Public License version 3.0 requirements will be met:
-** http://www.gnu.org/copyleft/gpl.html.
-**
-** Other Usage
-** Alternatively, this file may be used in accordance with the terms and
-** conditions contained in a signed written agreement between you and Nokia.
-**
-**
-**
-**
-**
-** $QT_END_LICENSE$
-**
-****************************************************************************/
-
-#include <QtCore/qglobal.h>
-#include <QtCore/qatomic.h>
-
-#ifdef SYMBIAN_E32_ATOMIC_API
-#include <e32atomics.h>
-#endif
-
-#include <e32debug.h>
-
-QT_BEGIN_NAMESPACE
-
-// Heap and handle info printer.
-// This way we can report on heap cells and handles that are really not owned by anything which still exists.
-// This information can be used to detect whether memory leaks are happening, particularly if these numbers grow as the app is used more.
-// This code is placed here as it happens to make it the very last static to be destroyed in a Qt app. The
-// reason assumed is that this file appears before any other file declaring static data in the generated
-// Symbian MMP file. This particular file was chosen as it is the earliest symbian specific file.
-struct QSymbianPrintExitInfo
-{
- QSymbianPrintExitInfo()
- {
- RThread().HandleCount(initProcessHandleCount, initThreadHandleCount);
- initCells = User::CountAllocCells();
- }
- ~QSymbianPrintExitInfo()
- {
- RProcess myProc;
- TFullName fullName = myProc.FileName();
- TInt cells = User::CountAllocCells();
- TInt processHandleCount=0;
- TInt threadHandleCount=0;
- RThread().HandleCount(processHandleCount, threadHandleCount);
- RDebug::Print(_L("%S exiting with %d allocated cells, %d handles"),
- &fullName,
- cells - initCells,
- (processHandleCount + threadHandleCount) - (initProcessHandleCount + initThreadHandleCount));
- }
- TInt initCells;
- TInt initProcessHandleCount;
- TInt initThreadHandleCount;
-} symbian_printExitInfo;
-
-Q_CORE_EXPORT bool QBasicAtomicInt::isReferenceCountingNative()
-{
-#if !defined(SYMBIAN_E32_ATOMIC_API) && defined(QT_HAVE_ARMV6)
- return true;
-#else
- return false;
-#endif
-}
-
-Q_CORE_EXPORT bool QBasicAtomicInt::isTestAndSetNative()
-{
-#if !defined(SYMBIAN_E32_ATOMIC_API) && defined(QT_HAVE_ARMV6)
- return true;
-#else
- return false;
-#endif
-}
-
-Q_CORE_EXPORT bool QBasicAtomicInt::isFetchAndStoreNative()
-{
-#if !defined(SYMBIAN_E32_ATOMIC_API) && defined(QT_HAVE_ARMV6)
- return true;
-#else
- return false;
-#endif
-}
-
-Q_CORE_EXPORT bool QBasicAtomicInt::isFetchAndAddNative()
-{
-#if !defined(SYMBIAN_E32_ATOMIC_API) && defined(QT_HAVE_ARMV6)
- return true;
-#else
- return false;
-#endif
-}
-
-Q_CORE_EXPORT bool QBasicAtomicPointer_isTestAndSetNative()
-{
-#if !defined(SYMBIAN_E32_ATOMIC_API) && defined(QT_HAVE_ARMV6)
- return true;
-#else
- return false;
-#endif
-}
-
-Q_CORE_EXPORT bool QBasicAtomicPointer_isFetchAndStoreNative()
-{
-#if !defined(SYMBIAN_E32_ATOMIC_API) && defined(QT_HAVE_ARMV6)
- return true;
-#else
- return false;
-#endif
-}
-
-Q_CORE_EXPORT bool QBasicAtomicPointer_isFetchAndAddNative()
-{
-#if !defined(SYMBIAN_E32_ATOMIC_API) && defined(QT_HAVE_ARMV6)
- return true;
-#else
- return false;
-#endif
-}
-
-#ifdef SYMBIAN_E32_ATOMIC_API
-//Symbian's API is SMP-safe when using SMP kernel, and cheap when using uniprocessor kernel
-
-//generate compiler error if casting assumptions are wrong (symbian64?)
-__ASSERT_COMPILE(sizeof(int) == sizeof(TUint32));
-__ASSERT_COMPILE(sizeof(void *) == sizeof(TUint32));
-
-Q_CORE_EXPORT
-bool QBasicAtomicInt_testAndSetOrdered(volatile int *_q_value, int expectedValue, int newValue)
-{
- return static_cast<bool>(__e32_atomic_cas_ord32(_q_value,
- reinterpret_cast<TUint32*>(&expectedValue), newValue));
-}
-
-Q_CORE_EXPORT
-bool QBasicAtomicInt_testAndSetRelaxed(volatile int *_q_value, int expectedValue, int newValue)
-{
- return static_cast<bool>(__e32_atomic_cas_rlx32(_q_value,
- reinterpret_cast<TUint32*>(&expectedValue), newValue));
-}
-
-Q_CORE_EXPORT
-bool QBasicAtomicInt_testAndSetAcquire(volatile int *_q_value, int expectedValue, int newValue)
-{
- return static_cast<bool>(__e32_atomic_cas_acq32(_q_value,
- reinterpret_cast<TUint32*>(&expectedValue), newValue));
-}
-
-Q_CORE_EXPORT
-bool QBasicAtomicInt_testAndSetRelease(volatile int *_q_value, int expectedValue, int newValue)
-{
- return static_cast<bool>(__e32_atomic_cas_rel32(_q_value,
- reinterpret_cast<TUint32*>(&expectedValue), newValue));
-}
-
-Q_CORE_EXPORT
-int QBasicAtomicInt_fetchAndStoreOrdered(volatile int *_q_value, int newValue)
-{
- return static_cast<int>(__e32_atomic_swp_ord32(_q_value, newValue));
-}
-
-Q_CORE_EXPORT
-int QBasicAtomicInt_fetchAndStoreRelaxed(volatile int *_q_value, int newValue)
-{
- return static_cast<int>(__e32_atomic_swp_rlx32(_q_value, newValue));
-}
-
-Q_CORE_EXPORT
-int QBasicAtomicInt_fetchAndStoreAcquire(volatile int *_q_value, int newValue)
-{
- return static_cast<int>(__e32_atomic_swp_acq32(_q_value, newValue));
-}
-
-Q_CORE_EXPORT
-int QBasicAtomicInt_fetchAndStoreRelease(volatile int *_q_value, int newValue)
-{
- return static_cast<int>(__e32_atomic_swp_rel32(_q_value, newValue));
-}
-
-Q_CORE_EXPORT
-int QBasicAtomicInt_fetchAndAddOrdered(volatile int *_q_value, int valueToAdd)
-{
- return static_cast<int>(__e32_atomic_add_ord32(_q_value, valueToAdd));
-}
-
-Q_CORE_EXPORT
-int QBasicAtomicInt_fetchAndAddRelaxed(volatile int *_q_value, int valueToAdd)
-{
- return static_cast<int>(__e32_atomic_add_rlx32(_q_value, valueToAdd));
-}
-
-Q_CORE_EXPORT
-int QBasicAtomicInt_fetchAndAddAcquire(volatile int *_q_value, int valueToAdd)
-{
- return static_cast<int>(__e32_atomic_add_acq32(_q_value, valueToAdd));
-}
-
-Q_CORE_EXPORT
-int QBasicAtomicInt_fetchAndAddRelease(volatile int *_q_value, int valueToAdd)
-{
- return static_cast<int>(__e32_atomic_add_rel32(_q_value, valueToAdd));
-}
-
-Q_CORE_EXPORT
-bool QBasicAtomicPointer_testAndSetOrdered(void * volatile *_q_value,
- void *expectedValue,
- void *newValue)
-{
- return static_cast<bool>(__e32_atomic_cas_ord_ptr(_q_value,
- &expectedValue,
- newValue));
-}
-
-Q_CORE_EXPORT
-bool QBasicAtomicPointer_testAndSetRelaxed(void * volatile *_q_value,
- void *expectedValue,
- void *newValue)
-{
- return static_cast<bool>(__e32_atomic_cas_rlx_ptr(_q_value,
- &expectedValue,
- newValue));
-}
-
-Q_CORE_EXPORT
-bool QBasicAtomicPointer_testAndSetAcquire(void * volatile *_q_value,
- void *expectedValue,
- void *newValue)
-{
- return static_cast<bool>(__e32_atomic_cas_acq_ptr(_q_value,
- &expectedValue,
- newValue));
-}
-
-Q_CORE_EXPORT
-bool QBasicAtomicPointer_testAndSetRelease(void * volatile *_q_value,
- void *expectedValue,
- void *newValue)
-{
- return static_cast<bool>(__e32_atomic_cas_rel_ptr(_q_value,
- &expectedValue,
- newValue));
-}
-
-Q_CORE_EXPORT
-void *QBasicAtomicPointer_fetchAndStoreOrdered(void * volatile *_q_value, void *newValue)
-{
- return __e32_atomic_swp_ord_ptr(_q_value, newValue);
-}
-
-Q_CORE_EXPORT
-void *QBasicAtomicPointer_fetchAndStoreRelaxed(void * volatile *_q_value, void *newValue)
-{
- return __e32_atomic_swp_rlx_ptr(_q_value, newValue);
-}
-
-Q_CORE_EXPORT
-void *QBasicAtomicPointer_fetchAndStoreAcquire(void * volatile *_q_value, void *newValue)
-{
- return __e32_atomic_swp_acq_ptr(_q_value, newValue);
-}
-
-Q_CORE_EXPORT
-void *QBasicAtomicPointer_fetchAndStoreRelease(void * volatile *_q_value, void *newValue)
-{
- return __e32_atomic_swp_rel_ptr(_q_value, newValue);
-}
-
-Q_CORE_EXPORT
-void *QBasicAtomicPointer_fetchAndAddOrdered(void * volatile *_q_value, qptrdiff valueToAdd)
-{
- return __e32_atomic_add_ord_ptr(_q_value, valueToAdd);
-}
-
-Q_CORE_EXPORT
-void *QBasicAtomicPointer_fetchAndAddRelaxed(void * volatile *_q_value, qptrdiff valueToAdd)
-{
- return __e32_atomic_add_rlx_ptr(_q_value, valueToAdd);
-}
-
-Q_CORE_EXPORT
-void *QBasicAtomicPointer_fetchAndAddAcquire(void * volatile *_q_value, qptrdiff valueToAdd)
-{
- return __e32_atomic_add_acq_ptr(_q_value, valueToAdd);
-}
-
-Q_CORE_EXPORT
-void *QBasicAtomicPointer_fetchAndAddRelease(void * volatile *_q_value, qptrdiff valueToAdd)
-{
- return __e32_atomic_add_rel_ptr(_q_value, valueToAdd);
-}
-
-#else
-//Symbian kernels 9.4 and earlier don't expose a suitable API
-
-//For ARMv6, the generic atomics are machine coded
-#ifndef QT_HAVE_ARMV6
-
-class QCriticalSection
-{
-public:
- QCriticalSection() { fastlock.CreateLocal(); }
- ~QCriticalSection() { fastlock.Close(); }
- void lock() { fastlock.Wait(); }
- void unlock() { fastlock.Signal(); }
-
-private:
- RFastLock fastlock;
-};
-
-QCriticalSection qAtomicCriticalSection;
-
-Q_CORE_EXPORT
-bool QBasicAtomicInt_testAndSetOrdered(volatile int *_q_value, int expectedValue, int newValue)
-{
- bool returnValue = false;
- qAtomicCriticalSection.lock();
- if (*_q_value == expectedValue) {
- *_q_value = newValue;
- returnValue = true;
- }
- qAtomicCriticalSection.unlock();
- return returnValue;
-}
-
-Q_CORE_EXPORT
-int QBasicAtomicInt_fetchAndStoreOrdered(volatile int *_q_value, int newValue)
-{
- int returnValue;
- qAtomicCriticalSection.lock();
- returnValue = *_q_value;
- *_q_value = newValue;
- qAtomicCriticalSection.unlock();
- return returnValue;
-}
-
-Q_CORE_EXPORT
-int QBasicAtomicInt_fetchAndAddOrdered(volatile int *_q_value, int valueToAdd)
-{
- int returnValue;
- qAtomicCriticalSection.lock();
- returnValue = *_q_value;
- *_q_value += valueToAdd;
- qAtomicCriticalSection.unlock();
- return returnValue;
-}
-
-Q_CORE_EXPORT
-bool QBasicAtomicPointer_testAndSetOrdered(void * volatile *_q_value,
- void *expectedValue,
- void *newValue)
-{
- bool returnValue = false;
- qAtomicCriticalSection.lock();
- if (*_q_value == expectedValue) {
- *_q_value = newValue;
- returnValue = true;
- }
- qAtomicCriticalSection.unlock();
- return returnValue;
-}
-
-Q_CORE_EXPORT
-void *QBasicAtomicPointer_fetchAndStoreOrdered(void * volatile *_q_value, void *newValue)
-{
- void *returnValue;
- qAtomicCriticalSection.lock();
- returnValue = *_q_value;
- *_q_value = newValue;
- qAtomicCriticalSection.unlock();
- return returnValue;
-}
-
-Q_CORE_EXPORT
-void *QBasicAtomicPointer_fetchAndAddOrdered(void * volatile *_q_value, qptrdiff valueToAdd)
-{
- void *returnValue;
- qAtomicCriticalSection.lock();
- returnValue = *_q_value;
- *_q_value = reinterpret_cast<char *>(returnValue) + valueToAdd;
- qAtomicCriticalSection.unlock();
- return returnValue;
-}
-
-Q_CORE_EXPORT
-bool QBasicAtomicInt_testAndSetRelaxed(volatile int *_q_value, int expectedValue, int newValue)
-{
- return QBasicAtomicInt_testAndSetOrdered(_q_value, expectedValue, newValue);
-}
-
-Q_CORE_EXPORT
-bool QBasicAtomicInt_testAndSetAcquire(volatile int *_q_value, int expectedValue, int newValue)
-{
- return QBasicAtomicInt_testAndSetOrdered(_q_value, expectedValue, newValue);
-}
-
-Q_CORE_EXPORT
-bool QBasicAtomicInt_testAndSetRelease(volatile int *_q_value, int expectedValue, int newValue)
-{
- return QBasicAtomicInt_testAndSetOrdered(_q_value, expectedValue, newValue);
-}
-
-Q_CORE_EXPORT
-int QBasicAtomicInt_fetchAndStoreRelaxed(volatile int *_q_value, int newValue)
-{
- return QBasicAtomicInt_fetchAndStoreOrdered(_q_value, newValue);
-}
-
-Q_CORE_EXPORT
-int QBasicAtomicInt_fetchAndStoreAcquire(volatile int *_q_value, int newValue)
-{
- return QBasicAtomicInt_fetchAndStoreOrdered(_q_value, newValue);
-}
-
-Q_CORE_EXPORT
-int QBasicAtomicInt_fetchAndStoreRelease(volatile int *_q_value, int newValue)
-{
- return QBasicAtomicInt_fetchAndStoreOrdered(_q_value, newValue);
-}
-
-Q_CORE_EXPORT
-int QBasicAtomicInt_fetchAndAddRelaxed(volatile int *_q_value, int valueToAdd)
-{
- return QBasicAtomicInt_fetchAndAddOrdered(_q_value, valueToAdd);
-}
-
-Q_CORE_EXPORT
-int QBasicAtomicInt_fetchAndAddAcquire(volatile int *_q_value, int valueToAdd)
-{
- return QBasicAtomicInt_fetchAndAddOrdered(_q_value, valueToAdd);
-}
-
-Q_CORE_EXPORT
-int QBasicAtomicInt_fetchAndAddRelease(volatile int *_q_value, int valueToAdd)
-{
- return QBasicAtomicInt_fetchAndAddOrdered(_q_value, valueToAdd);
-}
-
-Q_CORE_EXPORT
-bool QBasicAtomicPointer_testAndSetRelaxed(void * volatile *_q_value,
- void *expectedValue,
- void *newValue)
-{
- return QBasicAtomicPointer_testAndSetOrdered(_q_value, expectedValue, newValue);
-}
-
-Q_CORE_EXPORT
-bool QBasicAtomicPointer_testAndSetAcquire(void * volatile *_q_value,
- void *expectedValue,
- void *newValue)
-{
- return QBasicAtomicPointer_testAndSetOrdered(_q_value, expectedValue, newValue);
-}
-
-Q_CORE_EXPORT
-bool QBasicAtomicPointer_testAndSetRelease(void * volatile *_q_value,
- void *expectedValue,
- void *newValue)
-{
- return QBasicAtomicPointer_testAndSetOrdered(_q_value, expectedValue, newValue);
-}
-
-Q_CORE_EXPORT
-void *QBasicAtomicPointer_fetchAndStoreRelaxed(void * volatile *_q_value, void *newValue)
-{
- return QBasicAtomicPointer_fetchAndStoreOrdered(_q_value, newValue);
-}
-
-Q_CORE_EXPORT
-void *QBasicAtomicPointer_fetchAndStoreAcquire(void * volatile *_q_value, void *newValue)
-{
- return QBasicAtomicPointer_fetchAndStoreOrdered(_q_value, newValue);
-}
-
-Q_CORE_EXPORT
-void *QBasicAtomicPointer_fetchAndStoreRelease(void * volatile *_q_value, void *newValue)
-{
- return QBasicAtomicPointer_fetchAndStoreOrdered(_q_value, newValue);
-}
-
-Q_CORE_EXPORT
-void *QBasicAtomicPointer_fetchAndAddRelaxed(void * volatile *_q_value, qptrdiff valueToAdd)
-{
- return QBasicAtomicPointer_fetchAndAddOrdered(_q_value, valueToAdd);
-}
-
-Q_CORE_EXPORT
-void *QBasicAtomicPointer_fetchAndAddAcquire(void * volatile *_q_value, qptrdiff valueToAdd)
-{
- return QBasicAtomicPointer_fetchAndAddOrdered(_q_value, valueToAdd);
-}
-
-Q_CORE_EXPORT
-void *QBasicAtomicPointer_fetchAndAddRelease(void * volatile *_q_value, qptrdiff valueToAdd)
-{
- return QBasicAtomicPointer_fetchAndAddOrdered(_q_value, valueToAdd);
-}
-
-#endif // QT_HAVE_ARMV6
-#endif // SYMBIAN_E32_ATOMIC_API
-
-QT_END_NAMESPACE
+++ /dev/null
-/****************************************************************************
-**
-** Copyright (C) 2011 Nokia Corporation and/or its subsidiary(-ies).
-** All rights reserved.
-** Contact: Nokia Corporation (qt-info@nokia.com)
-**
-** This file is part of the QtCore module of the Qt Toolkit.
-**
-** $QT_BEGIN_LICENSE:LGPL$
-** GNU Lesser General Public License Usage
-** This file may be used under the terms of the GNU Lesser General Public
-** License version 2.1 as published by the Free Software Foundation and
-** appearing in the file LICENSE.LGPL included in the packaging of this
-** file. Please review the following information to ensure the GNU Lesser
-** General Public License version 2.1 requirements will be met:
-** http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
-**
-** In addition, as a special exception, Nokia gives you certain additional
-** rights. These rights are described in the Nokia Qt LGPL Exception
-** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
-**
-** GNU General Public License Usage
-** Alternatively, this file may be used under the terms of the GNU General
-** Public License version 3.0 as published by the Free Software Foundation
-** and appearing in the file LICENSE.GPL included in the packaging of this
-** file. Please review the following information to ensure the GNU General
-** Public License version 3.0 requirements will be met:
-** http://www.gnu.org/copyleft/gpl.html.
-**
-** Other Usage
-** Alternatively, this file may be used in accordance with the terms and
-** conditions contained in a signed written agreement between you and Nokia.
-**
-**
-**
-**
-**
-** $QT_END_LICENSE$
-**
-****************************************************************************/
-
-#include "qt_hybridheap_symbian_p.h"
-
-#ifdef QT_USE_NEW_SYMBIAN_ALLOCATOR
-
-extern const TInt KHeapShrinkHysRatio = 0x800;
-
-/*
- * \internal
- * Called from the qtmain.lib application wrapper.
- * Create a new heap as requested, but use the new allocator
- */
-Q_CORE_EXPORT TInt qt_symbian_SetupThreadHeap(TBool aNotFirst, SStdEpocThreadCreateInfo& aInfo)
-{
- TInt r = KErrNone;
- if (!aInfo.iAllocator && aInfo.iHeapInitialSize>0)
- {
- // new heap required
- RHeap* pH = NULL;
- r = UserHeap::CreateThreadHeap(aInfo, pH);
- }
- else if (aInfo.iAllocator)
- {
- // sharing a heap
- RAllocator* pA = aInfo.iAllocator;
- pA->Open();
- User::SwitchAllocator(pA);
- }
- return r;
-}
-
-#ifndef NO_NAMED_LOCAL_CHUNKS
-void TChunkCreateInfo::SetThreadHeap(TInt aInitialSize, TInt aMaxSize, const TDesC& aName)
-{
- iType = TChunkCreate::ENormal | TChunkCreate::EData;
- iMaxSize = aMaxSize;
- iInitialBottom = 0;
- iInitialTop = aInitialSize;
- iAttributes |= TChunkCreate::ELocalNamed;
- iName = &aName;
- iOwnerType = EOwnerThread;
-}
-#endif // NO_NAMED_LOCAL_CHUNKS
-
-void Panic(TCdtPanic reason)
-{
- _LIT(KCat, "QtHybridHeap");
- User::Panic(KCat, reason);
-}
-
-#else /* QT_USE_NEW_SYMBIAN_ALLOCATOR */
-
-#include <e32std.h>
-
-/*
- * \internal
- * Called from the qtmain.lib application wrapper.
- * Create a new heap as requested, using the default system allocator
- */
-Q_CORE_EXPORT TInt qt_symbian_SetupThreadHeap(TBool aNotFirst, SStdEpocThreadCreateInfo& aInfo)
-{
- return UserHeap::SetupThreadHeap(aNotFirst, aInfo);
-}
-
-#endif /* QT_USE_NEW_SYMBIAN_ALLOCATOR */
+++ /dev/null
-/****************************************************************************
-**
-** Copyright (C) 2011 Nokia Corporation and/or its subsidiary(-ies).
-** All rights reserved.
-** Contact: Nokia Corporation (qt-info@nokia.com)
-**
-** This file is part of the QtCore module of the Qt Toolkit.
-**
-** $QT_BEGIN_LICENSE:LGPL$
-** GNU Lesser General Public License Usage
-** This file may be used under the terms of the GNU Lesser General Public
-** License version 2.1 as published by the Free Software Foundation and
-** appearing in the file LICENSE.LGPL included in the packaging of this
-** file. Please review the following information to ensure the GNU Lesser
-** General Public License version 2.1 requirements will be met:
-** http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
-**
-** In addition, as a special exception, Nokia gives you certain additional
-** rights. These rights are described in the Nokia Qt LGPL Exception
-** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
-**
-** GNU General Public License Usage
-** Alternatively, this file may be used under the terms of the GNU General
-** Public License version 3.0 as published by the Free Software Foundation
-** and appearing in the file LICENSE.GPL included in the packaging of this
-** file. Please review the following information to ensure the GNU General
-** Public License version 3.0 requirements will be met:
-** http://www.gnu.org/copyleft/gpl.html.
-**
-** Other Usage
-** Alternatively, this file may be used in accordance with the terms and
-** conditions contained in a signed written agreement between you and Nokia.
-**
-**
-**
-**
-**
-** $QT_END_LICENSE$
-**
-****************************************************************************/
-
-#ifndef QT_HYBRIDHEAP_SYMBIAN_H
-#define QT_HYBRIDHEAP_SYMBIAN_H
-
-#include <qglobal.h>
-#include <e32cmn.h>
-
-#if !defined(__SYMBIAN_KERNEL_HYBRID_HEAP__) && !defined(__WINS__)
-//Enable the (backported) new allocator. When it is available in OS,
-//this flag should be disabled for that OS version onward
-#define QT_USE_NEW_SYMBIAN_ALLOCATOR
-#endif
-
-#ifdef QT_USE_NEW_SYMBIAN_ALLOCATOR
-
-#ifdef Q_CC_RVCT
-#pragma push
-#pragma arm
-#pragma Otime
-#pragma O2
-#endif
-
-#include "common_p.h"
-#ifdef QT_SYMBIAN_HAVE_U32STD_H
-#include <u32std.h>
-#endif
-#ifdef QT_SYMBIAN_HAVE_E32BTRACE_H
-#include <e32btrace.h>
-// enables btrace code compiling into
-#define ENABLE_BTRACE
-#endif
-#ifdef __KERNEL_MODE__
-#include <kernel/kern_priv.h>
-#endif
-#include "dla_p.h"
-#ifndef __KERNEL_MODE__
-#include "slab_p.h"
-#include "page_alloc_p.h"
-#endif
-#include "heap_hybrid_p.h"
-
-// disabling Symbian import/export macros to prevent heap_hybrid.cpp, copied from Symbian^4, from exporting symbols in arm builds
-// this minimises the code changes to heap_hybrid.cpp to ease future integration
-#undef UEXPORT_C
-#define UEXPORT_C
-#undef EXPORT_C
-#define EXPORT_C
-#undef IMPORT_D
-#define IMPORT_D
-
-// disabling code ported from Symbian^4 that we don't want/can't have in earlier platforms
-#define QT_SYMBIAN4_ALLOCATOR_UNWANTED_CODE
-
-#if defined(SYMBIAN_VERSION_9_1) || defined(SYMBIAN_VERSION_9_2) || defined(SYMBIAN_VERSION_9_3) || defined(SYMBIAN_VERSION_9_4) || defined(SYMBIAN_VERSION_SYMBIAN2)
-#define NO_NAMED_LOCAL_CHUNKS
-#endif
-
-// disabling the BTrace components of heap checking macros
-#ifndef ENABLE_BTRACE
-inline int noBTrace() {return 0;}
-#define BTraceContext12(a,b,c,d,e) noBTrace()
-#endif
-
-// declare ETHeapBadDebugFailParameter, where missing
-#define ETHeapBadDebugFailParameter ((TCdtPanic)213)
-
-#ifndef QT_SYMBIAN_HAVE_U32STD_H
-struct SThreadCreateInfo
- {
- TAny* iHandle;
- TInt iType;
- TThreadFunction iFunction;
- TAny* iPtr;
- TAny* iSupervisorStack;
- TInt iSupervisorStackSize;
- TAny* iUserStack;
- TInt iUserStackSize;
- TInt iInitialThreadPriority;
- TPtrC iName;
- TInt iTotalSize; // Size including any extras (must be a multiple of 8 bytes)
- };
-
-struct SStdEpocThreadCreateInfo : public SThreadCreateInfo
- {
- RAllocator* iAllocator;
- TInt iHeapInitialSize;
- TInt iHeapMaxSize;
- TInt iPadding; // Make structure size a multiple of 8 bytes
- };
-
-class TChunkCreate
- {
-public:
- // Attributes for chunk creation that are used by both euser and the kernel
- // by classes TChunkCreateInfo and SChunkCreateInfo, respectively.
- enum TChunkCreateAtt
- {
- ENormal = 0x00000000,
- EDoubleEnded = 0x00000001,
- EDisconnected = 0x00000002,
- ECache = 0x00000003,
- EMappingMask = 0x0000000f,
- ELocal = 0x00000000,
- EGlobal = 0x00000010,
- EData = 0x00000000,
- ECode = 0x00000020,
- EMemoryNotOwned = 0x00000040,
-
- // Force local chunk to be named. Only required for thread heap
- // chunks, all other local chunks should be nameless.
- ELocalNamed = 0x000000080,
-
- // Make global chunk read only to all processes but the controlling owner
- EReadOnly = 0x000000100,
-
- // Paging attributes for chunks.
- EPagingUnspec = 0x00000000,
- EPaged = 0x80000000,
- EUnpaged = 0x40000000,
- EPagingMask = EPaged | EUnpaged,
-
- EChunkCreateAttMask = EMappingMask | EGlobal | ECode |
- ELocalNamed | EReadOnly | EPagingMask,
- };
-public:
- TUint iAtt;
- TBool iForceFixed;
- TInt iInitialBottom;
- TInt iInitialTop;
- TInt iMaxSize;
- TUint8 iClearByte;
- };
-
-#endif // QT_SYMBIAN_HAVE_U32STD_H
-
-#endif /* QT_USE_NEW_SYMBIAN_ALLOCATOR */
-
-#endif /* QT_HYBRIDHEAP_SYMBIAN_H */
+++ /dev/null
-/****************************************************************************
-**
-** Copyright (C) 2011 Nokia Corporation and/or its subsidiary(-ies).
-** All rights reserved.
-** Contact: Nokia Corporation (qt-info@nokia.com)
-**
-** This file is part of the QtCore module of the Qt Toolkit.
-**
-** $QT_BEGIN_LICENSE:LGPL$
-** GNU Lesser General Public License Usage
-** This file may be used under the terms of the GNU Lesser General Public
-** License version 2.1 as published by the Free Software Foundation and
-** appearing in the file LICENSE.LGPL included in the packaging of this
-** file. Please review the following information to ensure the GNU Lesser
-** General Public License version 2.1 requirements will be met:
-** http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
-**
-** In addition, as a special exception, Nokia gives you certain additional
-** rights. These rights are described in the Nokia Qt LGPL Exception
-** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
-**
-** GNU General Public License Usage
-** Alternatively, this file may be used under the terms of the GNU General
-** Public License version 3.0 as published by the Free Software Foundation
-** and appearing in the file LICENSE.GPL included in the packaging of this
-** file. Please review the following information to ensure the GNU General
-** Public License version 3.0 requirements will be met:
-** http://www.gnu.org/copyleft/gpl.html.
-**
-** Other Usage
-** Alternatively, this file may be used in accordance with the terms and
-** conditions contained in a signed written agreement between you and Nokia.
-**
-**
-**
-**
-**
-** $QT_END_LICENSE$
-**
-****************************************************************************/
-
-#ifndef __KERNEL_MODE__
-
-class slab;
-class slabhdr;
-#define MAXSLABSIZE 56
-#define PAGESHIFT 12
-#define PAGESIZE (1<<PAGESHIFT)
-#define SLABSHIFT 10
-#define SLABSIZE (1 << SLABSHIFT)
-#define CELLALIGN 8
-
-
-const unsigned slabfull = 0;
-const TInt slabsperpage = (int)(PAGESIZE/SLABSIZE);
-#define HIBIT(bits) (((unsigned)bits & 0xc) ? 2 + ((unsigned)bits>>3) : ((unsigned) bits>>1))
-
-#define LOWBIT(bits) (((unsigned) bits&3) ? 1 - ((unsigned)bits&1) : 3 - (((unsigned)bits>>2)&1))
-
-#define ZEROBITS(header) (((unsigned)header & 0x70000000) ? 0 : 1)
-
-class slabhdr
-{
- public:
- unsigned iHeader;
- // made up of
- // bits | 31 | 30..28 | 27..18 | 17..12 | 11..8 | 7..0 |
- // +----------+--------+--------+--------+---------+----------+
- // field | floating | zero | used-4 | size | pagemap | free pos |
- //
- slab** iParent; // reference to iParent's pointer to this slab in tree
- slab* iChild1; // 1st iChild in tree
- slab* iChild2; // 2nd iChild in tree
-};
-
-const TInt KMaxSlabPayload = SLABSIZE - sizeof(slabhdr);
-#define MAXUSEDM4BITS 0x0fc00000
-#define FLOATING_BIT 0x80000000
-
-inline unsigned HeaderFloating(unsigned h)
-{return (h&0x80000000);}
-const unsigned maxuse = (SLABSIZE - sizeof(slabhdr))>>2;
-const unsigned firstpos = sizeof(slabhdr)>>2;
-
-#ifdef _DEBUG
-#define CHECKTREE(x) DoCheckSlabTree(x,EFalse)
-#define CHECKSLAB(s,t,p) DoCheckSlab(s,t,p)
-#define CHECKSLABBFR(s,p) {TUint32 b[4]; BuildPartialSlabBitmap(b,s,p);}
-#else
-#define CHECKTREE(x) (void)0
-#define CHECKSLAB(s,t,p) (void)0
-#define CHECKSLABBFR(s,p) (void)0
-#endif
-
-class slabset
-{
- public:
- slab* iPartial;
-};
-
-class slab : public slabhdr
-{
- public:
- void Init(unsigned clz);
- //static slab* SlabFor( void* p);
- static slab* SlabFor(const void* p) ;
- unsigned char iPayload[SLABSIZE-sizeof(slabhdr)];
-};
-
-class page
-{
- public:
- inline static page* PageFor(slab* s);
- //slab iSlabs;
- slab iSlabs[slabsperpage];
-};
-
-
-inline page* page::PageFor(slab* s)
-{
- return reinterpret_cast<page*>((unsigned(s))&~(PAGESIZE-1));
-}
-
-
-#endif // __KERNEL_MODE__