return (GrRenderTargetOpList*) this->getLastOpList();
}
+ static size_t ComputeSize(const GrSurfaceDesc& desc, int colorValuesPerPixel);
+
protected:
enum class Flags {
kNone = 0,
inline GrTexturePriv texturePriv();
inline const GrTexturePriv texturePriv() const;
+ static size_t ComputeSize(const GrSurfaceDesc&, bool hasMipMaps);
+
protected:
GrTexture(GrGpu*, const GrSurfaceDesc&, GrSLType, bool wasMipMapDataProvided);
* @return the amount of GPU memory used in bytes
*/
size_t gpuMemorySize() const {
- if (fTarget) {
- return fTarget->gpuMemorySize();
- }
-
if (kInvalidGpuMemorySize == fGpuMemorySize) {
fGpuMemorySize = this->onGpuMemorySize();
SkASSERT(kInvalidGpuMemorySize != fGpuMemorySize);
const uint32_t fUniqueID; // set from the backing resource for wrapped resources
static const size_t kInvalidGpuMemorySize = ~static_cast<size_t>(0);
+ SkDEBUGCODE(size_t getRawGpuMemorySize_debugOnly() const { return fGpuMemorySize; })
+
+private:
+ virtual size_t onGpuMemorySize() const = 0;
+
// This entry is lazily evaluated so, when the proxy wraps a resource, the resource
// will be called but, when the proxy is deferred, it will compute the answer itself.
// If the proxy computes its own answer that answer is checked (in debug mode) in
// the instantiation method.
mutable size_t fGpuMemorySize;
-private:
- virtual size_t onGpuMemorySize() const = 0;
-
// The last opList that wrote to or is currently going to write to this surface
// The opList can be closed (e.g., no render target context is currently bound
// to this renderTarget).
// and the opList of a destination surface to which this one is being drawn or copied.
GrOpList* fLastOpList;
+
typedef GrIORefProxy INHERITED;
};
INHERITED::onAbandon();
}
+size_t GrRenderTarget::ComputeSize(const GrSurfaceDesc& desc, int colorValuesPerPixel) {
+ SkASSERT(kUnknown_GrPixelConfig != desc.fConfig);
+ SkASSERT(!GrPixelConfigIsCompressed(desc.fConfig));
+ size_t colorBytes = GrBytesPerPixel(desc.fConfig);
+ SkASSERT(colorBytes > 0);
+
+ size_t rtSize = colorValuesPerPixel * desc.fWidth * desc.fHeight * colorBytes;
+ SkASSERT(rtSize <= WorstCaseSize(desc));
+ return rtSize;
+}
+
///////////////////////////////////////////////////////////////////////////////
bool GrRenderTargetPriv::attachStencilAttachment(GrStencilAttachment* stencil) {
if (!stencil && !fRenderTarget->fStencilAttachment) {
// No need to do any work since we currently don't have a stencil attachment and
- // we're not acctually adding one.
+ // we're not actually adding one.
return true;
}
fRenderTarget->fStencilAttachment = stencil;
return (this->flags() & Flags::kWindowRectsSupport) ?
fRenderTarget->getGpu()->caps()->maxWindowRectangles() : 0;
}
+
}
#ifdef SK_DEBUG
- if (kInvalidGpuMemorySize != fGpuMemorySize) {
- SkASSERT(fTarget->gpuMemorySize() <= fGpuMemorySize);
+ if (kInvalidGpuMemorySize != this->getRawGpuMemorySize_debugOnly()) {
+ SkASSERT(fTarget->gpuMemorySize() <= this->getRawGpuMemorySize_debugOnly());
}
#endif
return fTarget->gpuMemorySize();
}
- SkASSERT(kUnknown_GrPixelConfig != fDesc.fConfig);
- SkASSERT(!GrPixelConfigIsCompressed(fDesc.fConfig));
- size_t colorBytes = GrBytesPerPixel(fDesc.fConfig);
- SkASSERT(colorBytes > 0);
-
// TODO: do we have enough information to improve this worst case estimate?
- return (fDesc.fSampleCnt + 1) * fDesc.fWidth * fDesc.fHeight * colorBytes;
+ return GrRenderTarget::ComputeSize(fDesc, fDesc.fSampleCnt+1);
}
sk_sp<GrRenderTargetProxy> GrRenderTargetProxy::Make(const GrCaps& caps,
}
}
-size_t GrTexture::onGpuMemorySize() const {
+size_t GrTexture::ComputeSize(const GrSurfaceDesc& desc, bool hasMipMaps) {
size_t textureSize;
- if (GrPixelConfigIsCompressed(fDesc.fConfig)) {
- textureSize = GrCompressedFormatDataSize(fDesc.fConfig, fDesc.fWidth, fDesc.fHeight);
+ if (GrPixelConfigIsCompressed(desc.fConfig)) {
+ textureSize = GrCompressedFormatDataSize(desc.fConfig, desc.fWidth, desc.fHeight);
} else {
- textureSize = (size_t) fDesc.fWidth * fDesc.fHeight * GrBytesPerPixel(fDesc.fConfig);
+ textureSize = (size_t) desc.fWidth * desc.fHeight * GrBytesPerPixel(desc.fConfig);
}
- if (this->texturePriv().hasMipMaps()) {
+ if (hasMipMaps) {
// We don't have to worry about the mipmaps being a different size than
// we'd expect because we never change fDesc.fWidth/fHeight.
textureSize += textureSize/3;
}
- SkASSERT(!SkToBool(fDesc.fFlags & kRenderTarget_GrSurfaceFlag));
- SkASSERT(textureSize <= WorstCaseSize(fDesc));
+ SkASSERT(!SkToBool(desc.fFlags & kRenderTarget_GrSurfaceFlag));
+ SkASSERT(textureSize <= WorstCaseSize(desc));
return textureSize;
}
+size_t GrTexture::onGpuMemorySize() const {
+ return ComputeSize(fDesc, this->texturePriv().hasMipMaps());
+}
+
void GrTexture::validateDesc() const {
if (this->asRenderTarget()) {
// This texture has a render target
}
#ifdef SK_DEBUG
- if (kInvalidGpuMemorySize != fGpuMemorySize) {
- SkASSERT(fTarget->gpuMemorySize() <= fGpuMemorySize);
+ if (kInvalidGpuMemorySize != this->getRawGpuMemorySize_debugOnly()) {
+ SkASSERT(fTarget->gpuMemorySize() <= this->getRawGpuMemorySize_debugOnly());
}
#endif
}
size_t GrTextureProxy::onGpuMemorySize() const {
- size_t textureSize;
-
- if (GrPixelConfigIsCompressed(fDesc.fConfig)) {
- textureSize = GrCompressedFormatDataSize(fDesc.fConfig, fDesc.fWidth, fDesc.fHeight);
- } else {
- textureSize = (size_t) fDesc.fWidth * fDesc.fHeight * GrBytesPerPixel(fDesc.fConfig);
+ if (fTarget) {
+ return fTarget->gpuMemorySize();
}
+ static const bool kHasMipMaps = true;
// TODO: add tracking of mipmap state to improve the estimate
- textureSize += textureSize/3;
-
- SkASSERT(!SkToBool(fDesc.fFlags & kRenderTarget_GrSurfaceFlag));
- SkASSERT(textureSize <= GrSurface::WorstCaseSize(fDesc));
-
- return textureSize;
+ return GrTexture::ComputeSize(fDesc, kHasMipMaps);
}
sk_sp<GrTextureProxy> GrTextureProxy::Make(GrTextureProvider* texProvider,
fViewport.fWidth = desc.fWidth;
fViewport.fHeight = desc.fHeight;
- fGpuMemorySize = this->totalSamples() * this->totalBytesPerSample();
-
- SkASSERT(fGpuMemorySize <= WorstCaseSize(desc));
+ fNumSamplesOwnedPerPixel = this->totalSamples();
}
sk_sp<GrGLRenderTarget> GrGLRenderTarget::MakeWrapped(GrGLGpu* gpu,
}
size_t GrGLRenderTarget::onGpuMemorySize() const {
- return fGpuMemorySize;
+ return GrRenderTarget::ComputeSize(fDesc, fNumSamplesOwnedPerPixel);
}
bool GrGLRenderTarget::completeStencilAttachment() {
// Log any renderbuffer's contribution to memory. We only do this if we own the renderbuffer
// (have a fMSColorRenderbufferID).
if (fMSColorRenderbufferID) {
- size_t size = this->msaaSamples() * this->totalBytesPerSample();
+ size_t size = GrRenderTarget::ComputeSize(fDesc, this->msaaSamples());
// Due to this resource having both a texture and a renderbuffer component, dump as
// skia/gpu_resources/resource_#/renderbuffer
}
}
-size_t GrGLRenderTarget::totalBytesPerSample() const {
- SkASSERT(kUnknown_GrPixelConfig != fDesc.fConfig);
- SkASSERT(!GrPixelConfigIsCompressed(fDesc.fConfig));
- size_t colorBytes = GrBytesPerPixel(fDesc.fConfig);
- SkASSERT(colorBytes > 0);
-
- return fDesc.fWidth * fDesc.fHeight * colorBytes;
-}
-
int GrGLRenderTarget::msaaSamples() const {
if (fTexFBOID == kUnresolvableFBOID || fTexFBOID != fRTFBOID) {
// If the render target's FBO is external (fTexFBOID == kUnresolvableFBOID), or if we own
GrGLGpu* getGLGpu() const;
bool completeStencilAttachment() override;
- // The total size of the resource (including all pixels) for a single sample.
- size_t totalBytesPerSample() const;
int msaaSamples() const;
// The number total number of samples, including both MSAA and resolve texture samples.
int totalSamples() const;
// we want the rendering to be at top left (GL has origin in bottom left)
GrGLIRect fViewport;
- // onGpuMemorySize() needs to know the VRAM footprint of the FBO(s). However, abandon and
- // release zero out the IDs and the cache needs to know the size even after those actions.
- size_t fGpuMemorySize;
+ // The RenderTarget needs to be able to report its VRAM footprint even after abandon and
+ // release have potentially zeroed out the IDs (e.g., so the cache can reset itself). Since
+ // the IDs are just required for the computation in totalSamples we cache that result here.
+ int fNumSamplesOwnedPerPixel;
typedef GrRenderTarget INHERITED;
};
, fFramebuffer(nullptr)
, fCachedSimpleRenderPass(nullptr) {
SkASSERT(desc.fSampleCnt);
- // The plus 1 is to account for the resolve texture.
- fColorValuesPerPixel = desc.fSampleCnt + 1; // TODO: this still correct?
this->createFramebuffer(gpu);
this->registerWithCache(budgeted);
}
, fFramebuffer(nullptr)
, fCachedSimpleRenderPass(nullptr) {
SkASSERT(desc.fSampleCnt);
- // The plus 1 is to account for the resolve texture.
- fColorValuesPerPixel = desc.fSampleCnt + 1; // TODO: this still correct?
this->createFramebuffer(gpu);
}
, fFramebuffer(nullptr)
, fCachedSimpleRenderPass(nullptr) {
SkASSERT(!desc.fSampleCnt);
- fColorValuesPerPixel = 1;
this->createFramebuffer(gpu);
this->registerWithCache(budgeted);
}
, fFramebuffer(nullptr)
, fCachedSimpleRenderPass(nullptr) {
SkASSERT(!desc.fSampleCnt);
- fColorValuesPerPixel = 1;
this->createFramebuffer(gpu);
}
// This accounts for the texture's memory and any MSAA renderbuffer's memory.
size_t onGpuMemorySize() const override {
- SkASSERT(kUnknown_GrPixelConfig != fDesc.fConfig);
- SkASSERT(!GrPixelConfigIsCompressed(fDesc.fConfig));
- size_t colorBytes = GrBytesPerPixel(fDesc.fConfig);
- SkASSERT(colorBytes > 0);
- return fColorValuesPerPixel * fDesc.fWidth * fDesc.fHeight * colorBytes;
+ // The plus 1 is to account for the resolve texture.
+ return GrRenderTarget::ComputeSize(fDesc, fDesc.fSampleCnt+1); // TODO: this still correct?
}
void createFramebuffer(GrVkGpu* gpu);
void abandonInternalObjects();
const GrVkFramebuffer* fFramebuffer;
- int fColorValuesPerPixel;
// This is a cached pointer to a simple render pass. The render target should unref it
// once it is done with it.