1 // Copyright 2016 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "cc/tiles/gpu_image_decode_cache.h"
13 #include "base/auto_reset.h"
14 #include "base/bind.h"
15 #include "base/command_line.h"
16 #include "base/containers/span.h"
17 #include "base/debug/alias.h"
18 #include "base/feature_list.h"
19 #include "base/hash/hash.h"
20 #include "base/logging.h"
21 #include "base/memory/discardable_memory_allocator.h"
22 #include "base/memory/raw_ptr.h"
23 #include "base/metrics/histogram_macros.h"
24 #include "base/notreached.h"
25 #include "base/numerics/safe_math.h"
26 #include "base/strings/stringprintf.h"
27 #include "base/threading/thread_task_runner_handle.h"
28 #include "base/trace_event/memory_dump_manager.h"
29 #include "cc/base/devtools_instrumentation.h"
30 #include "cc/base/features.h"
31 #include "cc/base/histograms.h"
32 #include "cc/base/switches.h"
33 #include "cc/paint/paint_flags.h"
34 #include "cc/raster/scoped_grcontext_access.h"
35 #include "cc/raster/tile_task.h"
36 #include "cc/tiles/mipmap_util.h"
37 #include "cc/tiles/raster_dark_mode_filter.h"
38 #include "components/viz/common/gpu/raster_context_provider.h"
39 #include "gpu/command_buffer/client/context_support.h"
40 #include "gpu/command_buffer/client/gles2_interface.h"
41 #include "gpu/command_buffer/client/raster_interface.h"
42 #include "gpu/command_buffer/common/sync_token.h"
43 #include "gpu/config/gpu_finch_features.h"
44 #include "gpu/config/gpu_info.h"
45 #include "third_party/skia/include/core/SkBitmap.h"
46 #include "third_party/skia/include/core/SkCanvas.h"
47 #include "third_party/skia/include/core/SkColorFilter.h"
48 #include "third_party/skia/include/core/SkColorSpace.h"
49 #include "third_party/skia/include/core/SkData.h"
50 #include "third_party/skia/include/core/SkImageInfo.h"
51 #include "third_party/skia/include/core/SkPixmap.h"
52 #include "third_party/skia/include/core/SkRect.h"
53 #include "third_party/skia/include/core/SkSamplingOptions.h"
54 #include "third_party/skia/include/core/SkSize.h"
55 #include "third_party/skia/include/core/SkSurface.h"
56 #include "third_party/skia/include/core/SkYUVAPixmaps.h"
57 #include "third_party/skia/include/gpu/GrBackendSurface.h"
58 #include "third_party/skia/include/gpu/GrDirectContext.h"
59 #include "third_party/skia/include/gpu/GrYUVABackendTextures.h"
60 #include "ui/gfx/color_space.h"
61 #include "ui/gfx/geometry/size.h"
62 #include "ui/gfx/geometry/skia_conversions.h"
63 #include "ui/gl/trace_util.h"
67 // The number or entries to keep in the cache, depending on the memory state of
68 // the system. This limit can be breached by in-use cache items, which cannot
70 static const int kNormalMaxItemsInCacheForGpu = 2000;
71 static const int kSuspendedMaxItemsInCacheForGpu = 0;
73 // The maximum number of images that we can lock simultaneously in our working
74 // set. This is separate from the memory limit, as keeping very large numbers
75 // of small images simultaneously locked can lead to performance issues and
77 static const int kMaxItemsInWorkingSet = 256;
79 // lock_count │ used │ result state
80 // ═══════════╪═══════╪══════════════════
81 // 1 │ false │ WASTED_ONCE
82 // 1 │ true │ USED_ONCE
83 // >1 │ false │ WASTED_RELOCKED
84 // >1 │ true │ USED_RELOCKED
85 // Note that it's important not to reorder the following enum, since the
86 // numerical values are used in the histogram code.
87 enum ImageUsageState : int {
88 IMAGE_USAGE_STATE_WASTED_ONCE,
89 IMAGE_USAGE_STATE_USED_ONCE,
90 IMAGE_USAGE_STATE_WASTED_RELOCKED,
91 IMAGE_USAGE_STATE_USED_RELOCKED,
92 IMAGE_USAGE_STATE_COUNT
95 // Returns true if an image would not be drawn and should therefore be
96 // skipped rather than decoded.
97 bool SkipImage(const DrawImage& draw_image) {
98 if (!SkIRect::Intersects(
99 draw_image.src_rect(),
100 SkIRect::MakeWH(draw_image.paint_image().width(),
101 draw_image.paint_image().height()))) {
104 if (std::abs(draw_image.scale().width()) <
105 std::numeric_limits<float>::epsilon() ||
106 std::abs(draw_image.scale().height()) <
107 std::numeric_limits<float>::epsilon()) {
113 // Returns the filter quality to use for scaling the image to upload scale as
114 // well as for using when passing the decoded image to skia. Due to parity with
115 // SW and power impliciation, limit the filter quality to medium.
116 PaintFlags::FilterQuality CalculateDesiredFilterQuality(
117 const DrawImage& draw_image) {
118 return std::min(PaintFlags::FilterQuality::kMedium,
119 draw_image.filter_quality());
122 // Calculates the scale factor which can be used to scale an image to a given
124 SkSize CalculateScaleFactorForMipLevel(const DrawImage& draw_image,
125 int upload_scale_mip_level) {
126 gfx::Size base_size(draw_image.paint_image().width(),
127 draw_image.paint_image().height());
128 return MipMapUtil::GetScaleAdjustmentForLevel(base_size,
129 upload_scale_mip_level);
132 // Calculates the size of a given mip level.
133 gfx::Size CalculateSizeForMipLevel(const DrawImage& draw_image,
134 int upload_scale_mip_level) {
135 gfx::Size base_size(draw_image.paint_image().width(),
136 draw_image.paint_image().height());
137 return MipMapUtil::GetSizeForLevel(base_size, upload_scale_mip_level);
140 // Determines whether a draw image requires mips.
141 bool ShouldGenerateMips(const DrawImage& draw_image,
142 int upload_scale_mip_level) {
143 // If filter quality is less than medium, don't generate mips.
144 if (draw_image.filter_quality() < PaintFlags::FilterQuality::kMedium)
147 gfx::Size base_size(draw_image.paint_image().width(),
148 draw_image.paint_image().height());
149 // Take the abs of the scale, as mipmap functions don't handle (and aren't
150 // impacted by) negative image dimensions.
151 gfx::SizeF scaled_size = gfx::ScaleSize(
152 gfx::SizeF(base_size), std::abs(draw_image.scale().width()),
153 std::abs(draw_image.scale().height()));
155 // If our target size is smaller than our scaled size in both dimension, we
156 // need to generate mips.
157 gfx::SizeF target_size =
158 gfx::SizeF(CalculateSizeForMipLevel(draw_image, upload_scale_mip_level));
159 if (scaled_size.width() < target_size.width() &&
160 scaled_size.height() < target_size.height()) {
167 // Estimates the byte size of the decoded data for an image that goes through
168 // hardware decode acceleration. The actual byte size is only known once the
169 // image is decoded in the service side because different drivers have different
170 // pixel format and alignment requirements.
171 size_t EstimateHardwareDecodedDataSize(
172 const ImageHeaderMetadata* image_metadata) {
173 gfx::Size dimensions = image_metadata->coded_size
174 ? *(image_metadata->coded_size)
175 : image_metadata->image_size;
176 base::CheckedNumeric<size_t> y_data_size(dimensions.width());
177 y_data_size *= dimensions.height();
180 // TODO(andrescj): refactor to instead have a static_assert at the
181 // declaration site of gpu::ImageDecodeAcceleratorSubsampling to make sure
182 // it has the same number of entries as YUVSubsampling.
183 static_cast<int>(gpu::ImageDecodeAcceleratorSubsampling::kMaxValue) == 2,
184 "EstimateHardwareDecodedDataSize() must be adapted to support all "
185 "subsampling factors in ImageDecodeAcceleratorSubsampling");
186 base::CheckedNumeric<size_t> uv_width(dimensions.width());
187 base::CheckedNumeric<size_t> uv_height(dimensions.height());
188 switch (image_metadata->yuv_subsampling) {
189 case YUVSubsampling::k420:
195 case YUVSubsampling::k422:
199 case YUVSubsampling::k444:
205 base::CheckedNumeric<size_t> uv_data_size(uv_width * uv_height);
206 return (y_data_size + 2 * uv_data_size).ValueOrDie();
209 // Draws and scales the provided |draw_image| into the |target_pixmap|. If the
210 // draw/scale can be done directly, calls directly into PaintImage::Decode.
211 // if not, decodes to a compatible temporary pixmap and then converts that into
212 // the |target_pixmap|.
214 // For RGBX decoding, the default, the parameters |pixmap_y|,
215 // |pixmap_u|, and |pixmap_v| are NULL. Otherwise, the pixmaps share a
216 // contiguous block of allocated backing memory. If scaling needs to happen,
217 // it is done individually for each plane.
219 // The |do_yuv_decode| parameter indicates whether YUV decoding can and should
220 // be done, which is a combination of the underlying data requesting YUV and the
221 // cache mode (i.e. OOP-R or not) supporting it. The |yuva_data_type| field
222 // indicates the bit depth and type that should be used for Y, U, and V values.
223 bool DrawAndScaleImage(
224 const DrawImage& draw_image,
225 SkPixmap* target_pixmap,
226 PaintImage::GeneratorClientId client_id,
227 const bool do_yuv_decode,
228 const SkYUVAPixmapInfo::SupportedDataTypes& yuva_supported_data_types,
229 const SkYUVAPixmapInfo::DataType yuva_data_type =
230 SkYUVAPixmapInfo::DataType::kUnorm8,
231 SkPixmap* pixmap_y = nullptr,
232 SkPixmap* pixmap_u = nullptr,
233 SkPixmap* pixmap_v = nullptr) {
234 // We will pass color_space explicitly to PaintImage::Decode, so pull it out
235 // of the pixmap and populate a stand-alone value.
236 // Note: To pull colorspace out of the pixmap, we create a new pixmap with
237 // null colorspace but the same memory pointer.
238 // The backing memory for |pixmap| has been allocated based on
239 // image_data->size, so it is correct for YUV even if the other parameters
240 // for |pixmap| do not quite make sense for YUV (e.g. rowBytes).
241 SkPixmap pixmap(target_pixmap->info().makeColorSpace(nullptr),
242 target_pixmap->writable_addr(), target_pixmap->rowBytes());
243 uint8_t* data_ptr = reinterpret_cast<uint8_t*>(pixmap.writable_addr());
244 sk_sp<SkColorSpace> color_space = target_pixmap->info().refColorSpace();
246 const PaintImage& paint_image = draw_image.paint_image();
247 const bool is_original_decode =
248 SkISize::Make(paint_image.width(), paint_image.height()) ==
249 pixmap.bounds().size();
250 const bool is_nearest_neighbor =
251 draw_image.filter_quality() == PaintFlags::FilterQuality::kNone;
252 SkImageInfo info = pixmap.info();
253 SkYUVAPixmapInfo yuva_pixmap_info;
258 // If |do_yuv_decode| is true, IsYuv() must be true.
259 const bool yuva_info_initialized =
260 paint_image.IsYuv(yuva_supported_data_types, &yuva_pixmap_info);
261 DCHECK(yuva_info_initialized);
262 DCHECK_EQ(yuva_pixmap_info.dataType(), yuva_data_type);
263 // Only tri-planar YUV with no alpha is currently supported.
264 DCHECK_EQ(yuva_pixmap_info.numPlanes(), 3);
266 SkISize supported_size =
267 paint_image.GetSupportedDecodeSize(pixmap.bounds().size());
268 // We can directly decode into target pixmap if we are doing an original
269 // decode or we are decoding to scale without nearest neighbor filtering.
270 // TODO(crbug.com/927437): Although the JPEG decoder supports decoding to
271 // scale, we have not yet implemented YUV + decoding to scale, so we skip it.
272 const bool can_directly_decode =
273 is_original_decode || (!is_nearest_neighbor && !do_yuv_decode);
274 if (supported_size == pixmap.bounds().size() && can_directly_decode) {
276 SkYUVAPixmaps yuva_pixmaps = SkYUVAPixmaps::FromExternalMemory(
277 yuva_pixmap_info, pixmap.writable_addr());
278 // Only tri-planar YUV with no alpha is currently supported.
279 DCHECK_EQ(yuva_pixmaps.numPlanes(), 3);
280 *pixmap_y = yuva_pixmaps.plane(0);
281 *pixmap_u = yuva_pixmaps.plane(1);
282 *pixmap_v = yuva_pixmaps.plane(2);
283 return paint_image.DecodeYuv(yuva_pixmaps, draw_image.frame_index(),
286 return paint_image.Decode(pixmap.writable_addr(), &info, color_space,
287 draw_image.frame_index(), client_id);
290 // If we can't decode/scale directly, we will handle this in 2 steps.
291 // Step 1: Decode at the nearest (larger) directly supported size or the
292 // original size if nearest neighbor quality is requested.
293 // Step 2: Scale to |pixmap| size. If decoded image is half float backed and
294 // the device does not support image resize, decode to N32 color type and
295 // convert to F16 afterward. If doing YUV decoding, use an assumption of
296 // YUV420 and the dimensions of |pixmap|. Resizing happens on a plane-by-plane
298 SkImageInfo decode_info;
299 SkColorType yuva_color_type;
301 const size_t yuva_bytes = yuva_pixmap_info.computeTotalBytes();
302 if (SkImageInfo::ByteSizeOverflowed(yuva_bytes)) {
305 // We temporarily abuse the dimensions of the pixmap to ensure we allocate
306 // the proper number of bytes, but the actual plane dimensions are stored in
307 // |yuva_pixmap_info| and accessed within PaintImage::DecodeYuv() and below.
308 yuva_color_type = SkYUVAPixmapInfo::DefaultColorTypeForDataType(
309 yuva_pixmap_info.dataType(), 1);
310 decode_info = info.makeColorType(yuva_color_type).makeWH(yuva_bytes, 1);
312 SkISize decode_size =
314 ? SkISize::Make(paint_image.width(), paint_image.height())
316 decode_info = info.makeWH(decode_size.width(), decode_size.height());
319 const PaintFlags::FilterQuality filter_quality =
320 CalculateDesiredFilterQuality(draw_image);
321 const SkSamplingOptions sampling(
322 PaintFlags::FilterQualityToSkSamplingOptions(filter_quality));
324 SkBitmap decode_bitmap;
325 if (!decode_bitmap.tryAllocPixels(decode_info))
328 SkPixmap decode_pixmap = decode_bitmap.pixmap();
329 SkYUVAPixmaps unscaled_yuva_pixmaps;
331 unscaled_yuva_pixmaps = SkYUVAPixmaps::FromExternalMemory(
332 yuva_pixmap_info, decode_pixmap.writable_addr());
334 bool initial_decode_failed =
336 ? !paint_image.DecodeYuv(unscaled_yuva_pixmaps,
337 draw_image.frame_index(), client_id)
338 : !paint_image.Decode(decode_pixmap.writable_addr(), &decode_info,
339 color_space, draw_image.frame_index(),
341 if (initial_decode_failed)
345 const SkImageInfo y_info_scaled = info.makeColorType(yuva_color_type);
347 // Always promote scaled images to 4:4:4 to avoid blurriness. By using the
348 // same dimensions for the UV planes, we can avoid scaling them completely
349 // or at least avoid scaling the width.
351 // E.g., consider an original (100, 100) image scaled to mips level 1 (50%),
352 // the Y plane size will be (50, 50), but unscaled UV planes are already
353 // (50, 50) for 4:2:0, and (50, 100) for 4:2:2, so leaving them completely
354 // unscaled or only scaling the height for 4:2:2 has superior quality.
355 SkImageInfo u_info_scaled = y_info_scaled;
356 SkImageInfo v_info_scaled = y_info_scaled;
358 const size_t y_plane_bytes = y_info_scaled.computeMinByteSize();
359 const size_t u_plane_bytes = u_info_scaled.computeMinByteSize();
360 DCHECK(!SkImageInfo::ByteSizeOverflowed(y_plane_bytes));
361 DCHECK(!SkImageInfo::ByteSizeOverflowed(u_plane_bytes));
363 pixmap_y->reset(y_info_scaled, data_ptr, y_info_scaled.minRowBytes());
364 pixmap_u->reset(u_info_scaled, data_ptr + y_plane_bytes,
365 u_info_scaled.minRowBytes());
366 pixmap_v->reset(v_info_scaled, data_ptr + y_plane_bytes + u_plane_bytes,
367 v_info_scaled.minRowBytes());
369 const bool all_planes_scaled_successfully =
370 unscaled_yuva_pixmaps.plane(0).scalePixels(*pixmap_y, sampling) &&
371 unscaled_yuva_pixmaps.plane(1).scalePixels(*pixmap_u, sampling) &&
372 unscaled_yuva_pixmaps.plane(2).scalePixels(*pixmap_v, sampling);
373 return all_planes_scaled_successfully;
375 return decode_pixmap.scalePixels(pixmap, sampling);
378 // Takes ownership of the backing texture of an SkImage. This allows us to
379 // delete this texture under Skia (via discardable).
380 sk_sp<SkImage> TakeOwnershipOfSkImageBacking(GrDirectContext* context,
381 sk_sp<SkImage> image) {
382 // If the image is not texture backed, it has no backing, just return it.
383 if (!image->isTextureBacked()) {
387 GrSurfaceOrigin origin;
388 image->getBackendTexture(false /* flushPendingGrContextIO */, &origin);
389 SkColorType color_type = image->colorType();
390 if (color_type == kUnknown_SkColorType) {
393 sk_sp<SkColorSpace> color_space = image->refColorSpace();
394 GrBackendTexture backend_texture;
395 SkImage::BackendTextureReleaseProc release_proc;
396 SkImage::MakeBackendTextureFromSkImage(context, std::move(image),
397 &backend_texture, &release_proc);
398 return SkImage::MakeFromTexture(context, backend_texture, origin, color_type,
399 kPremul_SkAlphaType, std::move(color_space));
402 // Immediately deletes an SkImage, preventing caching of that image. Must be
403 // called while holding the context lock.
404 void DeleteSkImageAndPreventCaching(viz::RasterContextProvider* context,
405 sk_sp<SkImage>&& image) {
406 // No need to do anything for a non-texture-backed images.
407 if (!image->isTextureBacked())
410 sk_sp<SkImage> image_owned =
411 TakeOwnershipOfSkImageBacking(context->GrContext(), std::move(image));
412 // If context is lost, we may get a null image here.
414 // Delete |original_image_owned| as Skia will not clean it up. We are
415 // holding the context lock here, so we can delete immediately.
416 uint32_t texture_id =
417 GpuImageDecodeCache::GlIdFromSkImage(image_owned.get());
418 context->RasterInterface()->DeleteGpuRasterTexture(texture_id);
422 // TODO(ericrk): Replace calls to this with calls to SkImage::makeTextureImage,
423 // once that function handles colorspaces. https://crbug.com/834837
424 sk_sp<SkImage> MakeTextureImage(viz::RasterContextProvider* context,
425 sk_sp<SkImage> source_image,
426 sk_sp<SkColorSpace> target_color_space,
427 GrMipMapped mip_mapped) {
428 // Step 1: Upload image and generate mips if necessary. If we will be applying
429 // a color-space conversion, don't generate mips yet, instead do it after
430 // conversion, in step 3.
431 bool add_mips_after_color_conversion =
432 (target_color_space && mip_mapped == GrMipMapped::kYes);
433 sk_sp<SkImage> uploaded_image = source_image->makeTextureImage(
434 context->GrContext(),
435 add_mips_after_color_conversion ? GrMipMapped::kNo : mip_mapped);
437 // Step 2: Apply a color-space conversion if necessary.
438 if (uploaded_image && target_color_space) {
439 sk_sp<SkImage> pre_converted_image = uploaded_image;
440 uploaded_image = uploaded_image->makeColorSpace(target_color_space,
441 context->GrContext());
443 if (uploaded_image != pre_converted_image)
444 DeleteSkImageAndPreventCaching(context, std::move(pre_converted_image));
447 // Step 3: If we had a colorspace conversion, we couldn't mipmap in step 1, so
449 if (uploaded_image && add_mips_after_color_conversion) {
450 sk_sp<SkImage> pre_mipped_image = uploaded_image;
451 uploaded_image = uploaded_image->makeTextureImage(context->GrContext(),
453 DCHECK_NE(pre_mipped_image, uploaded_image);
454 DeleteSkImageAndPreventCaching(context, std::move(pre_mipped_image));
457 return uploaded_image;
460 // We use this below, instead of just a std::unique_ptr, so that we can run
461 // a Finch experiment to check the impact of not using discardable memory on the
463 class HeapDiscardableMemory : public base::DiscardableMemory {
465 explicit HeapDiscardableMemory(size_t size)
466 : memory_(new char[size]), size_(size) {}
467 ~HeapDiscardableMemory() override = default;
468 [[nodiscard]] bool Lock() override {
469 // Locking only succeeds when we have not yet discarded the memory (i.e. if
470 // we have never called |Unlock()|.)
471 return memory_ != nullptr;
473 void Unlock() override { Discard(); }
474 void* data() const override {
476 return static_cast<void*>(memory_.get());
478 void DiscardForTesting() override { Discard(); }
479 base::trace_event::MemoryAllocatorDump* CreateMemoryAllocatorDump(
481 base::trace_event::ProcessMemoryDump* pmd) const override {
482 auto* dump = pmd->CreateAllocatorDump(name);
483 dump->AddScalar(base::trace_event::MemoryAllocatorDump::kNameSize,
484 base::trace_event::MemoryAllocatorDump::kUnitsBytes, size_);
494 std::unique_ptr<char[]> memory_;
500 // Extract the information to uniquely identify a DrawImage for the purposes of
501 // the |in_use_cache_|.
502 GpuImageDecodeCache::InUseCacheKey::InUseCacheKey(const DrawImage& draw_image,
504 : frame_key(draw_image.frame_key()),
505 upload_scale_mip_level(mip_level),
506 filter_quality(CalculateDesiredFilterQuality(draw_image)),
507 target_color_params(draw_image.target_color_params()) {}
509 bool GpuImageDecodeCache::InUseCacheKey::operator==(
510 const InUseCacheKey& other) const {
511 return frame_key == other.frame_key &&
512 upload_scale_mip_level == other.upload_scale_mip_level &&
513 filter_quality == other.filter_quality &&
514 target_color_params == other.target_color_params;
517 size_t GpuImageDecodeCache::InUseCacheKeyHash::operator()(
518 const InUseCacheKey& cache_key) const {
519 return base::HashInts(
520 cache_key.target_color_params.GetHash(),
522 cache_key.frame_key.hash(),
523 base::HashInts(cache_key.upload_scale_mip_level,
524 static_cast<int>(cache_key.filter_quality))));
527 GpuImageDecodeCache::InUseCacheEntry::InUseCacheEntry(
528 scoped_refptr<ImageData> image_data)
529 : image_data(std::move(image_data)) {}
530 GpuImageDecodeCache::InUseCacheEntry::InUseCacheEntry(const InUseCacheEntry&) =
532 GpuImageDecodeCache::InUseCacheEntry::InUseCacheEntry(InUseCacheEntry&&) =
534 GpuImageDecodeCache::InUseCacheEntry::~InUseCacheEntry() = default;
536 // Task which decodes an image and stores the result in discardable memory.
537 // This task does not use GPU resources and can be run on any thread.
538 class GpuImageDecodeTaskImpl : public TileTask {
540 GpuImageDecodeTaskImpl(GpuImageDecodeCache* cache,
541 const DrawImage& draw_image,
542 const ImageDecodeCache::TracingInfo& tracing_info,
543 GpuImageDecodeCache::DecodeTaskType task_type)
544 : TileTask(TileTask::SupportsConcurrentExecution::kYes,
545 (base::FeatureList::IsEnabled(
546 features::kNormalPriorityImageDecoding)
547 ? TileTask::SupportsBackgroundThreadPriority::kNo
548 : TileTask::SupportsBackgroundThreadPriority::kYes)),
551 tracing_info_(tracing_info),
552 task_type_(task_type) {
553 DCHECK(!SkipImage(draw_image));
555 GpuImageDecodeTaskImpl(const GpuImageDecodeTaskImpl&) = delete;
557 GpuImageDecodeTaskImpl& operator=(const GpuImageDecodeTaskImpl&) = delete;
559 // Overridden from Task:
560 void RunOnWorkerThread() override {
561 TRACE_EVENT2("cc", "GpuImageDecodeTaskImpl::RunOnWorkerThread", "mode",
562 "gpu", "source_prepare_tiles_id",
563 tracing_info_.prepare_tiles_id);
565 const auto* image_metadata = image_.paint_image().GetImageHeaderMetadata();
566 const ImageType image_type =
567 image_metadata ? image_metadata->image_type : ImageType::kInvalid;
568 devtools_instrumentation::ScopedImageDecodeTask image_decode_task(
569 &image_.paint_image(),
570 devtools_instrumentation::ScopedImageDecodeTask::kGpu,
571 ImageDecodeCache::ToScopedTaskType(tracing_info_.task_type),
572 ImageDecodeCache::ToScopedImageType(image_type));
573 cache_->DecodeImageInTask(image_, tracing_info_.task_type);
576 // Overridden from TileTask:
577 void OnTaskCompleted() override {
578 cache_->OnImageDecodeTaskCompleted(image_, task_type_);
581 // Overridden from TileTask:
582 bool TaskContainsLCPCandidateImages() const override {
583 if (!HasCompleted() && image_.paint_image().may_be_lcp_candidate())
585 return TileTask::TaskContainsLCPCandidateImages();
589 ~GpuImageDecodeTaskImpl() override = default;
592 raw_ptr<GpuImageDecodeCache> cache_;
594 const ImageDecodeCache::TracingInfo tracing_info_;
595 const GpuImageDecodeCache::DecodeTaskType task_type_;
598 // Task which creates an image from decoded data. Typically this involves
599 // uploading data to the GPU, which requires this task be run on the non-
600 // concurrent thread.
601 class ImageUploadTaskImpl : public TileTask {
603 ImageUploadTaskImpl(GpuImageDecodeCache* cache,
604 const DrawImage& draw_image,
605 scoped_refptr<TileTask> decode_dependency,
606 const ImageDecodeCache::TracingInfo& tracing_info)
607 : TileTask(TileTask::SupportsConcurrentExecution::kNo,
608 TileTask::SupportsBackgroundThreadPriority::kYes),
611 tracing_info_(tracing_info) {
612 DCHECK(!SkipImage(draw_image));
613 // If an image is already decoded and locked, we will not generate a
615 if (decode_dependency)
616 dependencies_.push_back(std::move(decode_dependency));
618 ImageUploadTaskImpl(const ImageUploadTaskImpl&) = delete;
620 ImageUploadTaskImpl& operator=(const ImageUploadTaskImpl&) = delete;
622 // Override from Task:
623 void RunOnWorkerThread() override {
624 TRACE_EVENT2("cc", "ImageUploadTaskImpl::RunOnWorkerThread", "mode", "gpu",
625 "source_prepare_tiles_id", tracing_info_.prepare_tiles_id);
626 const auto* image_metadata = image_.paint_image().GetImageHeaderMetadata();
627 const ImageType image_type =
628 image_metadata ? image_metadata->image_type : ImageType::kInvalid;
629 devtools_instrumentation::ScopedImageUploadTask image_upload_task(
630 &image_.paint_image(), ImageDecodeCache::ToScopedImageType(image_type));
631 cache_->UploadImageInTask(image_);
634 // Overridden from TileTask:
635 void OnTaskCompleted() override {
636 cache_->OnImageUploadTaskCompleted(image_);
640 ~ImageUploadTaskImpl() override = default;
643 raw_ptr<GpuImageDecodeCache> cache_;
645 const ImageDecodeCache::TracingInfo tracing_info_;
648 GpuImageDecodeCache::ImageDataBase::ImageDataBase() = default;
649 GpuImageDecodeCache::ImageDataBase::~ImageDataBase() = default;
651 void GpuImageDecodeCache::ImageDataBase::OnSetLockedData(bool out_of_raster) {
652 DCHECK_EQ(usage_stats_.lock_count, 1);
654 usage_stats_.first_lock_out_of_raster = out_of_raster;
658 void GpuImageDecodeCache::ImageDataBase::OnResetData() {
660 usage_stats_ = UsageStats();
663 void GpuImageDecodeCache::ImageDataBase::OnLock() {
666 ++usage_stats_.lock_count;
669 void GpuImageDecodeCache::ImageDataBase::OnUnlock() {
672 if (usage_stats_.lock_count == 1)
673 usage_stats_.first_lock_wasted = !usage_stats_.used;
676 int GpuImageDecodeCache::ImageDataBase::UsageState() const {
677 ImageUsageState state = IMAGE_USAGE_STATE_WASTED_ONCE;
678 if (usage_stats_.lock_count == 1) {
679 if (usage_stats_.used)
680 state = IMAGE_USAGE_STATE_USED_ONCE;
682 state = IMAGE_USAGE_STATE_WASTED_ONCE;
684 if (usage_stats_.used)
685 state = IMAGE_USAGE_STATE_USED_RELOCKED;
687 state = IMAGE_USAGE_STATE_WASTED_RELOCKED;
693 GpuImageDecodeCache::DecodedImageData::DecodedImageData(
694 bool is_bitmap_backed,
695 bool can_do_hardware_accelerated_decode,
696 bool do_hardware_accelerated_decode)
697 : is_bitmap_backed_(is_bitmap_backed),
698 can_do_hardware_accelerated_decode_(can_do_hardware_accelerated_decode),
699 do_hardware_accelerated_decode_(do_hardware_accelerated_decode) {}
700 GpuImageDecodeCache::DecodedImageData::~DecodedImageData() {
704 bool GpuImageDecodeCache::DecodedImageData::Lock() {
710 void GpuImageDecodeCache::DecodedImageData::Unlock() {
715 void GpuImageDecodeCache::DecodedImageData::SetLockedData(
716 std::unique_ptr<base::DiscardableMemory> data,
717 sk_sp<SkImage> image,
718 bool out_of_raster) {
723 data_ = std::move(data);
724 image_ = std::move(image);
725 OnSetLockedData(out_of_raster);
728 void GpuImageDecodeCache::DecodedImageData::SetLockedData(
729 std::unique_ptr<base::DiscardableMemory> data,
730 sk_sp<SkImage> image_y,
731 sk_sp<SkImage> image_u,
732 sk_sp<SkImage> image_v,
733 bool out_of_raster) {
739 DCHECK(!image_yuv_planes_);
740 data_ = std::move(data);
741 image_yuv_planes_ = std::array<sk_sp<SkImage>, kNumYUVPlanes>();
742 image_yuv_planes_->at(static_cast<size_t>(YUVIndex::kY)) = std::move(image_y);
743 image_yuv_planes_->at(static_cast<size_t>(YUVIndex::kU)) = std::move(image_u);
744 image_yuv_planes_->at(static_cast<size_t>(YUVIndex::kV)) = std::move(image_v);
745 OnSetLockedData(out_of_raster);
748 void GpuImageDecodeCache::DecodedImageData::SetBitmapImage(
749 sk_sp<SkImage> image) {
750 DCHECK(is_bitmap_backed_);
751 image_ = std::move(image);
755 void GpuImageDecodeCache::DecodedImageData::ResetBitmapImage() {
756 DCHECK(is_bitmap_backed_);
758 image_yuv_planes_.reset();
762 void GpuImageDecodeCache::DecodedImageData::ResetData() {
765 DCHECK(image_yuv_planes_);
766 DCHECK(image_yuv_planes_->at(static_cast<size_t>(YUVIndex::kY)));
767 DCHECK(image_yuv_planes_->at(static_cast<size_t>(YUVIndex::kU)));
768 DCHECK(image_yuv_planes_->at(static_cast<size_t>(YUVIndex::kV)));
775 image_yuv_planes_.reset();
780 void GpuImageDecodeCache::DecodedImageData::ReportUsageStats() const {
781 if (do_hardware_accelerated_decode_) {
782 // When doing hardware decode acceleration, we don't want to record usage
783 // stats for the decode data. The reason is that the decode is done in the
784 // GPU process and the decoded result stays there. On the renderer side, we
785 // don't use or lock the decoded data, so reporting this status would
786 // incorrectly distort the software decoding statistics.
789 UMA_HISTOGRAM_ENUMERATION("Renderer4.GpuImageDecodeState",
790 static_cast<ImageUsageState>(UsageState()),
791 IMAGE_USAGE_STATE_COUNT);
792 UMA_HISTOGRAM_BOOLEAN("Renderer4.GpuImageDecodeState.FirstLockWasted",
793 usage_stats_.first_lock_wasted);
794 if (usage_stats_.first_lock_out_of_raster)
795 UMA_HISTOGRAM_BOOLEAN(
796 "Renderer4.GpuImageDecodeState.FirstLockWasted.OutOfRaster",
797 usage_stats_.first_lock_wasted);
800 GpuImageDecodeCache::UploadedImageData::UploadedImageData() = default;
801 GpuImageDecodeCache::UploadedImageData::~UploadedImageData() {
803 DCHECK(!image_yuv_planes_);
804 DCHECK(!gl_plane_ids_);
807 void GpuImageDecodeCache::UploadedImageData::SetImage(
808 sk_sp<SkImage> image,
809 bool represents_yuv_image) {
810 DCHECK(mode_ == Mode::kNone);
812 DCHECK(!transfer_cache_id_);
815 mode_ = Mode::kSkImage;
816 image_ = std::move(image);
817 // Calling isTexturedBacked() on the YUV SkImage would flatten it to RGB.
818 if (!represents_yuv_image && image_->isTextureBacked()) {
819 gl_id_ = GlIdFromSkImage(image_.get());
823 OnSetLockedData(false /* out_of_raster */);
826 void GpuImageDecodeCache::UploadedImageData::SetYuvImage(
827 sk_sp<SkImage> y_image_input,
828 sk_sp<SkImage> u_image_input,
829 sk_sp<SkImage> v_image_input) {
830 DCHECK(!image_yuv_planes_);
831 DCHECK(!gl_plane_ids_);
832 DCHECK(!transfer_cache_id_);
833 DCHECK(y_image_input);
834 DCHECK(u_image_input);
835 DCHECK(v_image_input);
837 mode_ = Mode::kSkImage;
838 image_yuv_planes_ = std::array<sk_sp<SkImage>, kNumYUVPlanes>();
839 image_yuv_planes_->at(static_cast<size_t>(YUVIndex::kY)) =
840 std::move(y_image_input);
841 image_yuv_planes_->at(static_cast<size_t>(YUVIndex::kU)) =
842 std::move(u_image_input);
843 image_yuv_planes_->at(static_cast<size_t>(YUVIndex::kV)) =
844 std::move(v_image_input);
845 if (y_image()->isTextureBacked() && u_image()->isTextureBacked() &&
846 v_image()->isTextureBacked()) {
847 gl_plane_ids_ = std::array<GrGLuint, kNumYUVPlanes>();
848 gl_plane_ids_->at(static_cast<size_t>(YUVIndex::kY)) =
849 GlIdFromSkImage(y_image().get());
850 gl_plane_ids_->at(static_cast<size_t>(YUVIndex::kU)) =
851 GlIdFromSkImage(u_image().get());
852 gl_plane_ids_->at(static_cast<size_t>(YUVIndex::kV)) =
853 GlIdFromSkImage(v_image().get());
857 void GpuImageDecodeCache::UploadedImageData::SetTransferCacheId(uint32_t id) {
858 DCHECK(mode_ == Mode::kNone);
860 DCHECK(!transfer_cache_id_);
862 mode_ = Mode::kTransferCache;
863 transfer_cache_id_ = id;
864 OnSetLockedData(false /* out_of_raster */);
867 void GpuImageDecodeCache::UploadedImageData::Reset() {
868 if (mode_ != Mode::kNone)
872 image_yuv_planes_.reset();
873 gl_plane_ids_.reset();
876 transfer_cache_id_.reset();
880 void GpuImageDecodeCache::UploadedImageData::ReportUsageStats() const {
881 UMA_HISTOGRAM_ENUMERATION("Renderer4.GpuImageUploadState",
882 static_cast<ImageUsageState>(UsageState()),
883 IMAGE_USAGE_STATE_COUNT);
884 UMA_HISTOGRAM_BOOLEAN("Renderer4.GpuImageUploadState.FirstLockWasted",
885 usage_stats_.first_lock_wasted);
888 GpuImageDecodeCache::ImageData::ImageData(
889 PaintImage::Id paint_image_id,
890 DecodedDataMode mode,
892 const TargetColorParams& target_color_params,
893 PaintFlags::FilterQuality quality,
894 int upload_scale_mip_level,
896 bool is_bitmap_backed,
897 bool can_do_hardware_accelerated_decode,
898 bool do_hardware_accelerated_decode,
899 absl::optional<SkYUVAPixmapInfo> yuva_info)
900 : paint_image_id(paint_image_id),
903 target_color_params(target_color_params),
905 upload_scale_mip_level(upload_scale_mip_level),
906 needs_mips(needs_mips),
907 is_bitmap_backed(is_bitmap_backed),
908 yuva_pixmap_info(yuva_info),
909 decode(is_bitmap_backed,
910 can_do_hardware_accelerated_decode,
911 do_hardware_accelerated_decode) {
912 if (yuva_pixmap_info.has_value()) {
913 // This is the only plane config supported currently.
914 DCHECK_EQ(yuva_pixmap_info->yuvaInfo().planeConfig(),
915 SkYUVAInfo::PlaneConfig::kY_U_V);
919 GpuImageDecodeCache::ImageData::~ImageData() {
920 // We should never delete ImageData while it is in use or before it has been
922 DCHECK_EQ(0u, upload.ref_count);
923 DCHECK_EQ(0u, decode.ref_count);
924 DCHECK_EQ(false, decode.is_locked());
925 // This should always be cleaned up before deleting the image, as it needs to
926 // be freed with the GL context lock held.
927 DCHECK(!HasUploadedData());
930 bool GpuImageDecodeCache::ImageData::IsGpuOrTransferCache() const {
931 return mode == DecodedDataMode::kGpu ||
932 mode == DecodedDataMode::kTransferCache;
935 bool GpuImageDecodeCache::ImageData::HasUploadedData() const {
937 case DecodedDataMode::kGpu:
938 // upload.image() stores the result of MakeFromYUVATextures
939 if (upload.image()) {
940 // TODO(915968): Be smarter about being able to re-upload planes
941 // selectively if only some get deleted from under us.
942 DCHECK(!yuva_pixmap_info.has_value() || upload.has_yuv_planes());
946 case DecodedDataMode::kTransferCache:
947 return !!upload.transfer_cache_id();
948 case DecodedDataMode::kCpu:
954 void GpuImageDecodeCache::ImageData::ValidateBudgeted() const {
955 // If the image is budgeted, it must be refed.
957 DCHECK_GT(upload.ref_count, 0u);
961 GrGLuint GpuImageDecodeCache::GlIdFromSkImage(const SkImage* image) {
962 DCHECK(image->isTextureBacked());
963 GrBackendTexture backend_texture =
964 image->getBackendTexture(true /* flushPendingGrContextIO */);
965 if (!backend_texture.isValid())
968 GrGLTextureInfo info;
969 if (!backend_texture.getGLTextureInfo(&info))
975 GpuImageDecodeCache::GpuImageDecodeCache(
976 viz::RasterContextProvider* context,
977 bool use_transfer_cache,
978 SkColorType color_type,
979 size_t max_working_set_bytes,
980 int max_texture_size,
981 RasterDarkModeFilter* const dark_mode_filter)
982 : color_type_(color_type),
983 use_transfer_cache_(use_transfer_cache),
985 max_texture_size_(max_texture_size),
986 generator_client_id_(PaintImage::GetNextGeneratorClientId()),
987 enable_clipped_image_scaling_(
988 base::CommandLine::ForCurrentProcess()->HasSwitch(
989 switches::kEnableClippedImageScaling)),
990 persistent_cache_(PersistentCache::NO_AUTO_EVICT),
991 max_working_set_bytes_(max_working_set_bytes),
992 max_working_set_items_(kMaxItemsInWorkingSet),
993 dark_mode_filter_(dark_mode_filter) {
994 DCHECK_NE(generator_client_id_, PaintImage::kDefaultGeneratorClientId);
995 // Note that to compute |allow_accelerated_jpeg_decodes_| and
996 // |allow_accelerated_webp_decodes_|, the last thing we check is the feature
997 // flag. That's because we want to ensure that we're in OOP-R mode and the
998 // hardware decoder supports the image type so that finch experiments
999 // involving hardware decode acceleration only count users in that
1000 // population (both in the 'control' and the 'enabled' groups).
1001 allow_accelerated_jpeg_decodes_ =
1002 use_transfer_cache &&
1003 context_->ContextSupport()->IsJpegDecodeAccelerationSupported() &&
1004 base::FeatureList::IsEnabled(features::kVaapiJpegImageDecodeAcceleration);
1005 allow_accelerated_webp_decodes_ =
1006 use_transfer_cache &&
1007 context_->ContextSupport()->IsWebPDecodeAccelerationSupported() &&
1008 base::FeatureList::IsEnabled(features::kVaapiWebPImageDecodeAcceleration);
1011 // TODO(crbug.com/1110007): We shouldn't need to lock to get capabilities.
1012 absl::optional<viz::RasterContextProvider::ScopedRasterContextLock>
1014 if (context_->GetLock())
1015 context_lock.emplace(context_);
1016 const auto& caps = context_->ContextCapabilities();
1017 yuva_supported_data_types_.enableDataType(
1018 SkYUVAPixmapInfo::DataType::kUnorm8, 1);
1019 if (caps.texture_norm16) {
1020 yuva_supported_data_types_.enableDataType(
1021 SkYUVAPixmapInfo::DataType::kUnorm16, 1);
1023 if (caps.texture_half_float_linear) {
1024 yuva_supported_data_types_.enableDataType(
1025 SkYUVAPixmapInfo::DataType::kFloat16, 1);
1029 // In certain cases, ThreadTaskRunnerHandle isn't set (Android Webview).
1030 // Don't register a dump provider in these cases.
1031 if (base::ThreadTaskRunnerHandle::IsSet()) {
1032 base::trace_event::MemoryDumpManager::GetInstance()->RegisterDumpProvider(
1033 this, "cc::GpuImageDecodeCache", base::ThreadTaskRunnerHandle::Get());
1035 memory_pressure_listener_ = std::make_unique<base::MemoryPressureListener>(
1036 FROM_HERE, base::BindRepeating(&GpuImageDecodeCache::OnMemoryPressure,
1037 base::Unretained(this)));
1039 TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("cc.debug"),
1040 "GpuImageDecodeCache::DarkModeFilter", "dark_mode_filter",
1041 static_cast<void*>(dark_mode_filter_));
1044 GpuImageDecodeCache::~GpuImageDecodeCache() {
1045 // Debugging crbug.com/650234.
1046 CHECK_EQ(0u, in_use_cache_.size());
1048 // SetShouldAggressivelyFreeResources will zero our limits and free all
1049 // outstanding image memory.
1050 SetShouldAggressivelyFreeResources(true);
1052 // It is safe to unregister, even if we didn't register in the constructor.
1053 base::trace_event::MemoryDumpManager::GetInstance()->UnregisterDumpProvider(
1057 ImageDecodeCache::TaskResult GpuImageDecodeCache::GetTaskForImageAndRef(
1058 const DrawImage& draw_image,
1059 const TracingInfo& tracing_info) {
1060 DCHECK_EQ(tracing_info.task_type, TaskType::kInRaster);
1061 return GetTaskForImageAndRefInternal(draw_image, tracing_info,
1062 DecodeTaskType::kPartOfUploadTask);
1065 ImageDecodeCache::TaskResult
1066 GpuImageDecodeCache::GetOutOfRasterDecodeTaskForImageAndRef(
1067 const DrawImage& draw_image) {
1068 return GetTaskForImageAndRefInternal(
1069 draw_image, TracingInfo(0, TilePriority::NOW, TaskType::kOutOfRaster),
1070 DecodeTaskType::kStandAloneDecodeTask);
1073 ImageDecodeCache::TaskResult GpuImageDecodeCache::GetTaskForImageAndRefInternal(
1074 const DrawImage& draw_image,
1075 const TracingInfo& tracing_info,
1076 DecodeTaskType task_type) {
1077 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("cc.debug"),
1078 "GpuImageDecodeCache::GetTaskForImageAndRef");
1080 if (SkipImage(draw_image)) {
1081 return TaskResult(false /* need_unref */, false /* is_at_raster_decode */,
1082 false /* can_do_hardware_accelerated_decode */);
1085 base::AutoLock lock(lock_);
1086 const InUseCacheKey cache_key = InUseCacheKeyFromDrawImage(draw_image);
1087 ImageData* image_data = GetImageDataForDrawImage(draw_image, cache_key);
1088 scoped_refptr<ImageData> new_data;
1090 // We need an ImageData, create one now. Note that hardware decode
1091 // acceleration is allowed only in the DecodeTaskType::kPartOfUploadTask
1092 // case. This prevents the img.decode() and checkerboard images paths from
1093 // going through hardware decode acceleration.
1094 new_data = CreateImageData(
1097 DecodeTaskType::kPartOfUploadTask /* allow_hardware_decode */);
1098 image_data = new_data.get();
1099 } else if (image_data->decode.decode_failure) {
1100 // We have already tried and failed to decode this image, so just return.
1101 return TaskResult(false /* need_unref */, false /* is_at_raster_decode */,
1102 image_data->decode.can_do_hardware_accelerated_decode());
1103 } else if (task_type == DecodeTaskType::kPartOfUploadTask &&
1104 image_data->upload.task) {
1105 // We had an existing upload task, ref the image and return the task.
1106 image_data->ValidateBudgeted();
1107 RefImage(draw_image, cache_key);
1108 return TaskResult(image_data->upload.task,
1109 image_data->decode.can_do_hardware_accelerated_decode());
1110 } else if (task_type == DecodeTaskType::kStandAloneDecodeTask &&
1111 image_data->decode.stand_alone_task) {
1112 // We had an existing out of raster task, ref the image and return the task.
1113 image_data->ValidateBudgeted();
1114 RefImage(draw_image, cache_key);
1115 DCHECK(!image_data->decode.can_do_hardware_accelerated_decode());
1116 return TaskResult(image_data->decode.stand_alone_task,
1117 false /* can_do_hardware_accelerated_decode */);
1120 // Ensure that the image we're about to decode/upload will fit in memory, if
1121 // not already budgeted.
1122 if (!image_data->is_budgeted && !EnsureCapacity(image_data->size)) {
1123 // Image will not fit, do an at-raster decode.
1124 return TaskResult(false /* need_unref */, true /* is_at_raster_decode */,
1125 image_data->decode.can_do_hardware_accelerated_decode());
1128 // If we had to create new image data, add it to our map now that we know it
1131 AddToPersistentCache(draw_image, std::move(new_data));
1133 // Ref the image before creating a task - this ref is owned by the caller, and
1134 // it is their responsibility to release it by calling UnrefImage.
1135 RefImage(draw_image, cache_key);
1137 // If we already have an image and it is locked (or lock-able), just return
1138 // that. The image must be budgeted before we attempt to lock it.
1139 DCHECK(image_data->is_budgeted);
1140 if (image_data->HasUploadedData() &&
1141 TryLockImage(HaveContextLock::kNo, draw_image, image_data)) {
1142 return TaskResult(true /* need_unref */, false /* is_at_raster_decode */,
1143 image_data->decode.can_do_hardware_accelerated_decode());
1146 scoped_refptr<TileTask> task;
1147 if (task_type == DecodeTaskType::kPartOfUploadTask) {
1148 // Ref image and create a upload and decode tasks. We will release this ref
1149 // in UploadTaskCompleted.
1150 RefImage(draw_image, cache_key);
1151 task = base::MakeRefCounted<ImageUploadTaskImpl>(
1153 GetImageDecodeTaskAndRef(draw_image, tracing_info, task_type),
1155 image_data->upload.task = task;
1157 task = GetImageDecodeTaskAndRef(draw_image, tracing_info, task_type);
1161 return TaskResult(task,
1162 image_data->decode.can_do_hardware_accelerated_decode());
1165 return TaskResult(true /* needs_unref */, false /* is_at_raster_decode */,
1166 image_data->decode.can_do_hardware_accelerated_decode());
1169 void GpuImageDecodeCache::UnrefImage(const DrawImage& draw_image) {
1170 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("cc.debug"),
1171 "GpuImageDecodeCache::UnrefImage");
1172 base::AutoLock lock(lock_);
1173 UnrefImageInternal(draw_image, InUseCacheKeyFromDrawImage(draw_image));
1176 bool GpuImageDecodeCache::UseCacheForDrawImage(
1177 const DrawImage& draw_image) const {
1178 if (draw_image.paint_image().IsTextureBacked())
1184 DecodedDrawImage GpuImageDecodeCache::GetDecodedImageForDraw(
1185 const DrawImage& draw_image) {
1186 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("cc.debug"),
1187 "GpuImageDecodeCache::GetDecodedImageForDraw");
1189 // We are being called during raster. The context lock must already be
1190 // acquired by the caller.
1191 CheckContextLockAcquiredIfNecessary();
1193 // If we're skipping the image, then the filter quality doesn't matter.
1194 if (SkipImage(draw_image))
1195 return DecodedDrawImage();
1197 base::AutoLock lock(lock_);
1198 const InUseCacheKey cache_key = InUseCacheKeyFromDrawImage(draw_image);
1199 ImageData* image_data = GetImageDataForDrawImage(draw_image, cache_key);
1201 // We didn't find the image, create a new entry.
1202 auto data = CreateImageData(draw_image, true /* allow_hardware_decode */);
1203 image_data = data.get();
1204 AddToPersistentCache(draw_image, std::move(data));
1207 // Ref the image and decode so that they stay alive while we are
1208 // decoding/uploading.
1209 // Note that refing the image will attempt to budget the image, if not already
1211 RefImage(draw_image, cache_key);
1212 RefImageDecode(draw_image, cache_key);
1214 // We may or may not need to decode and upload the image we've found, the
1215 // following functions early-out to if we already decoded.
1216 DecodeImageAndGenerateDarkModeFilterIfNecessary(draw_image, image_data,
1217 TaskType::kInRaster);
1218 UploadImageIfNecessary(draw_image, image_data);
1219 // Unref the image decode, but not the image. The image ref will be released
1220 // in DrawWithImageFinished.
1221 UnrefImageDecode(draw_image, cache_key);
1223 sk_sp<SkColorFilter> dark_mode_color_filter = nullptr;
1224 if (draw_image.use_dark_mode()) {
1225 auto it = image_data->decode.dark_mode_color_filter_cache.find(
1226 draw_image.src_rect());
1227 if (it != image_data->decode.dark_mode_color_filter_cache.end())
1228 dark_mode_color_filter = it->second;
1231 if (image_data->mode == DecodedDataMode::kTransferCache) {
1232 DCHECK(use_transfer_cache_);
1233 auto id = image_data->upload.transfer_cache_id();
1235 image_data->upload.mark_used();
1236 DCHECK(id || image_data->decode.decode_failure);
1238 SkSize scale_factor = CalculateScaleFactorForMipLevel(
1239 draw_image, image_data->upload_scale_mip_level);
1240 DecodedDrawImage decoded_draw_image(
1241 id, std::move(dark_mode_color_filter), SkSize(), scale_factor,
1242 CalculateDesiredFilterQuality(draw_image), image_data->needs_mips,
1243 image_data->is_budgeted);
1244 return decoded_draw_image;
1246 DCHECK(!use_transfer_cache_);
1247 sk_sp<SkImage> image = image_data->upload.image();
1249 image_data->upload.mark_used();
1250 DCHECK(image || image_data->decode.decode_failure);
1252 SkSize scale_factor = CalculateScaleFactorForMipLevel(
1253 draw_image, image_data->upload_scale_mip_level);
1254 DecodedDrawImage decoded_draw_image(
1255 std::move(image), std::move(dark_mode_color_filter), SkSize(),
1256 scale_factor, CalculateDesiredFilterQuality(draw_image),
1257 image_data->is_budgeted);
1258 return decoded_draw_image;
1262 void GpuImageDecodeCache::DrawWithImageFinished(
1263 const DrawImage& draw_image,
1264 const DecodedDrawImage& decoded_draw_image) {
1265 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("cc.debug"),
1266 "GpuImageDecodeCache::DrawWithImageFinished");
1268 // Release decoded_draw_image to ensure the referenced SkImage can be
1269 // cleaned up below.
1270 { auto delete_decoded_draw_image = std::move(decoded_draw_image); }
1272 // We are being called during raster. The context lock must already be
1273 // acquired by the caller.
1274 CheckContextLockAcquiredIfNecessary();
1276 if (SkipImage(draw_image))
1279 base::AutoLock lock(lock_);
1280 UnrefImageInternal(draw_image, InUseCacheKeyFromDrawImage(draw_image));
1282 // We are mid-draw and holding the context lock, ensure we clean up any
1283 // textures (especially at-raster), which may have just been marked for
1284 // deletion by UnrefImage.
1285 RunPendingContextThreadOperations();
1288 void GpuImageDecodeCache::ReduceCacheUsage() NO_THREAD_SAFETY_ANALYSIS {
1289 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("cc.debug"),
1290 "GpuImageDecodeCache::ReduceCacheUsage");
1291 base::AutoLock lock(lock_);
1294 // This is typically called when no tasks are running (between scheduling
1295 // tasks). Try to lock and run pending operations if possible, but don't
1298 // NO_THREAD_SAFETY_ANALYSIS: runtime-dependent locking.
1299 if (context_->GetLock() && !context_->GetLock()->Try())
1302 RunPendingContextThreadOperations();
1303 if (context_->GetLock())
1304 context_->GetLock()->Release();
1307 void GpuImageDecodeCache::SetShouldAggressivelyFreeResources(
1308 bool aggressively_free_resources) {
1309 TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("cc.debug"),
1310 "GpuImageDecodeCache::SetShouldAggressivelyFreeResources",
1311 "agressive_free_resources", aggressively_free_resources);
1312 if (aggressively_free_resources) {
1313 absl::optional<viz::RasterContextProvider::ScopedRasterContextLock>
1315 if (context_->GetLock())
1316 context_lock.emplace(context_);
1318 base::AutoLock lock(lock_);
1319 aggressively_freeing_resources_ = aggressively_free_resources;
1322 // We are holding the context lock, so finish cleaning up deleted images
1324 RunPendingContextThreadOperations();
1326 base::AutoLock lock(lock_);
1327 aggressively_freeing_resources_ = aggressively_free_resources;
1331 void GpuImageDecodeCache::ClearCache() {
1332 base::AutoLock lock(lock_);
1333 for (auto it = persistent_cache_.begin(); it != persistent_cache_.end();)
1334 it = RemoveFromPersistentCache(it);
1335 DCHECK(persistent_cache_.empty());
1336 paint_image_entries_.clear();
1339 void GpuImageDecodeCache::RecordStats() {
1340 base::AutoLock lock(lock_);
1342 if (working_set_bytes_ > 0 &&
1343 base::CheckDiv(static_cast<double>(working_set_bytes_),
1344 max_working_set_bytes_)
1345 .AssignIfValid(&cache_usage)) {
1346 UMA_HISTOGRAM_PERCENTAGE(
1347 "Renderer4.GpuImageDecodeState.CachePeakUsagePercent",
1352 void GpuImageDecodeCache::AddToPersistentCache(const DrawImage& draw_image,
1353 scoped_refptr<ImageData> data) {
1354 lock_.AssertAcquired();
1356 WillAddCacheEntry(draw_image);
1357 persistent_cache_.Put(draw_image.frame_key(), std::move(data));
1360 template <typename Iterator>
1361 Iterator GpuImageDecodeCache::RemoveFromPersistentCache(Iterator it) {
1362 lock_.AssertAcquired();
1364 if (it->second->decode.ref_count != 0 || it->second->upload.ref_count != 0) {
1365 // Orphan the image and erase it from the |persisent_cache_|. This ensures
1366 // that the image will be deleted once all refs are removed.
1367 it->second->is_orphaned = true;
1369 // Current entry has no refs. Ensure it is not locked.
1370 DCHECK(!it->second->decode.is_locked());
1371 DCHECK(!it->second->upload.is_locked());
1373 // Unlocked images must not be budgeted.
1374 DCHECK(!it->second->is_budgeted);
1376 // Free the uploaded image if it exists.
1377 if (it->second->HasUploadedData())
1378 DeleteImage(it->second.get());
1381 auto entries_it = paint_image_entries_.find(it->second->paint_image_id);
1382 DCHECK(entries_it != paint_image_entries_.end());
1383 DCHECK_GT(entries_it->second.count, 0u);
1385 // If this is the last entry for this image, remove its tracking.
1386 --entries_it->second.count;
1387 if (entries_it->second.count == 0u)
1388 paint_image_entries_.erase(entries_it);
1390 return persistent_cache_.Erase(it);
1393 size_t GpuImageDecodeCache::GetMaximumMemoryLimitBytes() const {
1394 return max_working_set_bytes_;
1397 void GpuImageDecodeCache::AddTextureDump(
1398 base::trace_event::ProcessMemoryDump* pmd,
1399 const std::string& texture_dump_name,
1401 const GrGLuint gl_id,
1402 const size_t locked_size) const {
1403 using base::trace_event::MemoryAllocatorDump;
1404 using base::trace_event::MemoryAllocatorDumpGuid;
1406 MemoryAllocatorDump* dump = pmd->CreateAllocatorDump(texture_dump_name);
1407 dump->AddScalar(MemoryAllocatorDump::kNameSize,
1408 MemoryAllocatorDump::kUnitsBytes, bytes);
1410 // Dump the "locked_size" as an additional column.
1411 dump->AddScalar("locked_size", MemoryAllocatorDump::kUnitsBytes, locked_size);
1413 MemoryAllocatorDumpGuid guid;
1414 guid = gl::GetGLTextureClientGUIDForTracing(
1415 context_->ContextSupport()->ShareGroupTracingGUID(), gl_id);
1416 pmd->CreateSharedGlobalAllocatorDump(guid);
1417 // Importance of 3 gives this dump priority over the dump made by Skia
1418 // (importance 2), attributing memory here.
1419 const int kImportance = 3;
1420 pmd->AddOwnershipEdge(dump->guid(), guid, kImportance);
1423 void GpuImageDecodeCache::MemoryDumpYUVImage(
1424 base::trace_event::ProcessMemoryDump* pmd,
1425 const ImageData* image_data,
1426 const std::string& dump_base_name,
1427 size_t locked_size) const {
1428 using base::trace_event::MemoryAllocatorDump;
1429 DCHECK(image_data->yuva_pixmap_info.has_value());
1430 DCHECK(image_data->upload.has_yuv_planes());
1432 struct PlaneMemoryDumpInfo {
1436 std::vector<PlaneMemoryDumpInfo> plane_dump_infos;
1437 // TODO(crbug.com/910276): Also include alpha plane if applicable.
1438 plane_dump_infos.push_back({image_data->upload.y_image()->textureSize(),
1439 image_data->upload.gl_y_id()});
1440 plane_dump_infos.push_back({image_data->upload.u_image()->textureSize(),
1441 image_data->upload.gl_u_id()});
1442 plane_dump_infos.push_back({image_data->upload.v_image()->textureSize(),
1443 image_data->upload.gl_v_id()});
1445 for (size_t i = 0u; i < plane_dump_infos.size(); ++i) {
1446 auto plane_dump_info = plane_dump_infos.at(i);
1447 // If the image is currently locked, we dump the locked size per plane.
1451 base::StringPrintf("/plane_%0u", base::checked_cast<uint32_t>(i)),
1452 plane_dump_info.byte_size, plane_dump_info.gl_id,
1453 locked_size ? plane_dump_info.byte_size : 0u);
1457 bool GpuImageDecodeCache::OnMemoryDump(
1458 const base::trace_event::MemoryDumpArgs& args,
1459 base::trace_event::ProcessMemoryDump* pmd) {
1460 using base::trace_event::MemoryAllocatorDump;
1461 using base::trace_event::MemoryAllocatorDumpGuid;
1462 using base::trace_event::MemoryDumpLevelOfDetail;
1464 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("cc.debug"),
1465 "GpuImageDecodeCache::OnMemoryDump");
1467 if (args.level_of_detail == MemoryDumpLevelOfDetail::BACKGROUND) {
1468 std::string dump_name = base::StringPrintf(
1469 "cc/image_memory/cache_0x%" PRIXPTR, reinterpret_cast<uintptr_t>(this));
1470 MemoryAllocatorDump* dump = pmd->CreateAllocatorDump(dump_name);
1471 dump->AddScalar(MemoryAllocatorDump::kNameSize,
1472 MemoryAllocatorDump::kUnitsBytes, working_set_bytes_);
1474 // Early out, no need for more detail in a BACKGROUND dump.
1478 for (const auto& image_pair : persistent_cache_) {
1479 const ImageData* image_data = image_pair.second.get();
1480 int image_id = static_cast<int>(image_pair.first.hash());
1482 // If we have discardable decoded data, dump this here.
1483 if (image_data->decode.data()) {
1484 std::string discardable_dump_name = base::StringPrintf(
1485 "cc/image_memory/cache_0x%" PRIXPTR "/discardable/image_%d",
1486 reinterpret_cast<uintptr_t>(this), image_id);
1487 MemoryAllocatorDump* dump =
1488 image_data->decode.data()->CreateMemoryAllocatorDump(
1489 discardable_dump_name.c_str(), pmd);
1490 // Dump the "locked_size" as an additional column.
1491 // This lets us see the amount of discardable which is contributing to
1493 size_t locked_size =
1494 image_data->decode.is_locked() ? image_data->size : 0u;
1495 dump->AddScalar("locked_size", MemoryAllocatorDump::kUnitsBytes,
1499 // If we have an uploaded image (that is actually on the GPU, not just a
1500 // CPU wrapper), upload it here.
1501 if (image_data->HasUploadedData() &&
1502 image_data->mode == DecodedDataMode::kGpu) {
1503 size_t discardable_size = image_data->size;
1504 auto* context_support = context_->ContextSupport();
1505 // If the discardable system has deleted this out from under us, log a
1506 // size of 0 to match software discardable.
1507 if (image_data->yuva_pixmap_info.has_value() &&
1508 context_support->ThreadsafeDiscardableTextureIsDeletedForTracing(
1509 image_data->upload.gl_y_id()) &&
1510 context_support->ThreadsafeDiscardableTextureIsDeletedForTracing(
1511 image_data->upload.gl_u_id()) &&
1512 context_support->ThreadsafeDiscardableTextureIsDeletedForTracing(
1513 image_data->upload.gl_v_id())) {
1514 discardable_size = 0;
1515 } else if (context_support
1516 ->ThreadsafeDiscardableTextureIsDeletedForTracing(
1517 image_data->upload.gl_id())) {
1518 discardable_size = 0;
1521 std::string gpu_dump_base_name = base::StringPrintf(
1522 "cc/image_memory/cache_0x%" PRIXPTR "/gpu/image_%d",
1523 reinterpret_cast<uintptr_t>(this), image_id);
1524 size_t locked_size =
1525 image_data->upload.is_locked() ? discardable_size : 0u;
1526 if (image_data->yuva_pixmap_info.has_value()) {
1527 MemoryDumpYUVImage(pmd, image_data, gpu_dump_base_name, locked_size);
1529 AddTextureDump(pmd, gpu_dump_base_name, discardable_size,
1530 image_data->upload.gl_id(), locked_size);
1538 void GpuImageDecodeCache::DecodeImageInTask(const DrawImage& draw_image,
1539 TaskType task_type) {
1540 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("cc.debug"),
1541 "GpuImageDecodeCache::DecodeImage");
1542 base::AutoLock lock(lock_);
1543 ImageData* image_data = GetImageDataForDrawImage(
1544 draw_image, InUseCacheKeyFromDrawImage(draw_image));
1546 DCHECK(image_data->is_budgeted) << "Must budget an image for pre-decoding";
1547 DecodeImageAndGenerateDarkModeFilterIfNecessary(draw_image, image_data,
1551 void GpuImageDecodeCache::UploadImageInTask(const DrawImage& draw_image) {
1552 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("cc.debug"),
1553 "GpuImageDecodeCache::UploadImage");
1554 absl::optional<viz::RasterContextProvider::ScopedRasterContextLock>
1556 if (context_->GetLock())
1557 context_lock.emplace(context_);
1559 absl::optional<ScopedGrContextAccess> gr_context_access;
1560 if (!use_transfer_cache_)
1561 gr_context_access.emplace(context_);
1562 base::AutoLock lock(lock_);
1564 auto cache_key = InUseCacheKeyFromDrawImage(draw_image);
1565 ImageData* image_data = GetImageDataForDrawImage(draw_image, cache_key);
1567 DCHECK(image_data->is_budgeted) << "Must budget an image for pre-decoding";
1569 if (image_data->is_bitmap_backed)
1570 DecodeImageAndGenerateDarkModeFilterIfNecessary(draw_image, image_data,
1571 TaskType::kInRaster);
1572 UploadImageIfNecessary(draw_image, image_data);
1575 void GpuImageDecodeCache::OnImageDecodeTaskCompleted(
1576 const DrawImage& draw_image,
1577 DecodeTaskType task_type) {
1578 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("cc.debug"),
1579 "GpuImageDecodeCache::OnImageDecodeTaskCompleted");
1580 base::AutoLock lock(lock_);
1581 auto cache_key = InUseCacheKeyFromDrawImage(draw_image);
1582 // Decode task is complete, remove our reference to it.
1583 ImageData* image_data = GetImageDataForDrawImage(draw_image, cache_key);
1585 UMA_HISTOGRAM_BOOLEAN("Compositing.DecodeLCPCandidateImage.Hardware",
1586 draw_image.paint_image().may_be_lcp_candidate());
1587 if (task_type == DecodeTaskType::kPartOfUploadTask) {
1588 DCHECK(image_data->decode.task);
1589 image_data->decode.task = nullptr;
1591 DCHECK(task_type == DecodeTaskType::kStandAloneDecodeTask);
1592 DCHECK(image_data->decode.stand_alone_task);
1593 image_data->decode.stand_alone_task = nullptr;
1596 // While the decode task is active, we keep a ref on the decoded data.
1597 // Release that ref now.
1598 UnrefImageDecode(draw_image, cache_key);
1601 void GpuImageDecodeCache::OnImageUploadTaskCompleted(
1602 const DrawImage& draw_image) {
1603 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("cc.debug"),
1604 "GpuImageDecodeCache::OnImageUploadTaskCompleted");
1605 base::AutoLock lock(lock_);
1606 // Upload task is complete, remove our reference to it.
1607 InUseCacheKey cache_key = InUseCacheKeyFromDrawImage(draw_image);
1608 ImageData* image_data = GetImageDataForDrawImage(draw_image, cache_key);
1610 DCHECK(image_data->upload.task);
1611 image_data->upload.task = nullptr;
1613 // While the upload task is active, we keep a ref on both the image it will be
1614 // populating, as well as the decode it needs to populate it. Release these
1616 UnrefImageDecode(draw_image, cache_key);
1617 UnrefImageInternal(draw_image, cache_key);
1620 int GpuImageDecodeCache::CalculateUploadScaleMipLevel(
1621 const DrawImage& draw_image) const {
1622 // Images which are being clipped will have color-bleeding if scaled.
1623 // TODO(ericrk): Investigate uploading clipped images to handle this case and
1624 // provide further optimization. crbug.com/620899
1625 if (!enable_clipped_image_scaling_) {
1626 const bool is_clipped = draw_image.src_rect() !=
1627 SkIRect::MakeWH(draw_image.paint_image().width(),
1628 draw_image.paint_image().height());
1633 gfx::Size base_size(draw_image.paint_image().width(),
1634 draw_image.paint_image().height());
1635 // Ceil our scaled size so that the mip map generated is guaranteed to be
1636 // larger. Take the abs of the scale, as mipmap functions don't handle
1637 // (and aren't impacted by) negative image dimensions.
1638 gfx::Size scaled_size =
1639 gfx::ScaleToCeiledSize(base_size, std::abs(draw_image.scale().width()),
1640 std::abs(draw_image.scale().height()));
1642 return MipMapUtil::GetLevelForSize(base_size, scaled_size);
1645 GpuImageDecodeCache::InUseCacheKey
1646 GpuImageDecodeCache::InUseCacheKeyFromDrawImage(
1647 const DrawImage& draw_image) const {
1648 return InUseCacheKey(draw_image, CalculateUploadScaleMipLevel(draw_image));
1651 // Checks if an image decode needs a decode task and returns it.
1652 scoped_refptr<TileTask> GpuImageDecodeCache::GetImageDecodeTaskAndRef(
1653 const DrawImage& draw_image,
1654 const TracingInfo& tracing_info,
1655 DecodeTaskType task_type) {
1656 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("cc.debug"),
1657 "GpuImageDecodeCache::GetImageDecodeTaskAndRef");
1658 lock_.AssertAcquired();
1660 auto cache_key = InUseCacheKeyFromDrawImage(draw_image);
1662 // This ref is kept alive while an upload task may need this decode. We
1663 // release this ref in UploadTaskCompleted.
1664 if (task_type == DecodeTaskType::kPartOfUploadTask)
1665 RefImageDecode(draw_image, cache_key);
1667 ImageData* image_data = GetImageDataForDrawImage(draw_image, cache_key);
1669 if (image_data->decode.do_hardware_accelerated_decode())
1672 // No decode is necessary for bitmap backed images.
1673 if (image_data->decode.is_locked() || image_data->is_bitmap_backed) {
1674 // We should never be creating a decode task for a not budgeted image.
1675 DCHECK(image_data->is_budgeted);
1676 // We should never be creating a decode for an already-uploaded image.
1677 DCHECK(!image_data->HasUploadedData());
1681 // We didn't have an existing locked image, create a task to lock or decode.
1682 scoped_refptr<TileTask>& existing_task =
1683 (task_type == DecodeTaskType::kPartOfUploadTask)
1684 ? image_data->decode.task
1685 : image_data->decode.stand_alone_task;
1686 if (!existing_task) {
1687 // Ref image decode and create a decode task. This ref will be released in
1688 // DecodeTaskCompleted.
1689 RefImageDecode(draw_image, cache_key);
1690 existing_task = base::MakeRefCounted<GpuImageDecodeTaskImpl>(
1691 this, draw_image, tracing_info, task_type);
1693 return existing_task;
1696 void GpuImageDecodeCache::RefImageDecode(const DrawImage& draw_image,
1697 const InUseCacheKey& cache_key) {
1698 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("cc.debug"),
1699 "GpuImageDecodeCache::RefImageDecode");
1700 lock_.AssertAcquired();
1701 auto found = in_use_cache_.find(cache_key);
1702 DCHECK(found != in_use_cache_.end());
1703 ++found->second.ref_count;
1704 ++found->second.image_data->decode.ref_count;
1705 OwnershipChanged(draw_image, found->second.image_data.get());
1708 void GpuImageDecodeCache::UnrefImageDecode(const DrawImage& draw_image,
1709 const InUseCacheKey& cache_key) {
1710 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("cc.debug"),
1711 "GpuImageDecodeCache::UnrefImageDecode");
1712 lock_.AssertAcquired();
1713 auto found = in_use_cache_.find(cache_key);
1714 DCHECK(found != in_use_cache_.end());
1715 DCHECK_GT(found->second.image_data->decode.ref_count, 0u);
1716 DCHECK_GT(found->second.ref_count, 0u);
1717 --found->second.ref_count;
1718 --found->second.image_data->decode.ref_count;
1719 OwnershipChanged(draw_image, found->second.image_data.get());
1720 if (found->second.ref_count == 0u) {
1721 in_use_cache_.erase(found);
1725 void GpuImageDecodeCache::RefImage(const DrawImage& draw_image,
1726 const InUseCacheKey& cache_key) {
1727 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("cc.debug"),
1728 "GpuImageDecodeCache::RefImage");
1729 lock_.AssertAcquired();
1730 auto found = in_use_cache_.find(cache_key);
1732 // If no secondary cache entry was found for the given |draw_image|, then
1733 // the draw_image only exists in the |persistent_cache_|. Create an in-use
1735 if (found == in_use_cache_.end()) {
1736 auto found_image = persistent_cache_.Peek(draw_image.frame_key());
1737 DCHECK(found_image != persistent_cache_.end());
1738 DCHECK(IsCompatible(found_image->second.get(), draw_image));
1739 found = in_use_cache_
1740 .insert(InUseCache::value_type(
1741 cache_key, InUseCacheEntry(found_image->second)))
1745 DCHECK(found != in_use_cache_.end());
1746 ++found->second.ref_count;
1747 ++found->second.image_data->upload.ref_count;
1748 OwnershipChanged(draw_image, found->second.image_data.get());
1751 void GpuImageDecodeCache::UnrefImageInternal(const DrawImage& draw_image,
1752 const InUseCacheKey& cache_key) {
1753 lock_.AssertAcquired();
1754 auto found = in_use_cache_.find(cache_key);
1755 DCHECK(found != in_use_cache_.end());
1756 DCHECK_GT(found->second.image_data->upload.ref_count, 0u);
1757 DCHECK_GT(found->second.ref_count, 0u);
1758 --found->second.ref_count;
1759 --found->second.image_data->upload.ref_count;
1760 OwnershipChanged(draw_image, found->second.image_data.get());
1761 if (found->second.ref_count == 0u) {
1762 in_use_cache_.erase(found);
1766 // Called any time an image or decode ref count changes. Takes care of any
1767 // necessary memory budget book-keeping and cleanup.
1768 void GpuImageDecodeCache::OwnershipChanged(const DrawImage& draw_image,
1769 ImageData* image_data) {
1770 lock_.AssertAcquired();
1773 image_data->upload.ref_count > 0 || image_data->decode.ref_count > 0;
1774 // If we have no image refs on an image, we should unbudget it.
1775 if (!has_any_refs && image_data->is_budgeted) {
1776 DCHECK_GE(working_set_bytes_, image_data->size);
1777 DCHECK_GE(working_set_items_, 1u);
1778 working_set_bytes_ -= image_data->size;
1779 working_set_items_ -= 1;
1780 image_data->is_budgeted = false;
1783 // Don't keep around completely empty images. This can happen if an image's
1784 // decode/upload tasks were both cancelled before completing.
1785 const bool has_cpu_data =
1786 image_data->decode.data() ||
1787 (image_data->is_bitmap_backed && image_data->decode.image());
1788 if (!has_any_refs && !image_data->HasUploadedData() && !has_cpu_data &&
1789 !image_data->is_orphaned) {
1790 auto found_persistent = persistent_cache_.Peek(draw_image.frame_key());
1791 if (found_persistent != persistent_cache_.end())
1792 RemoveFromPersistentCache(found_persistent);
1795 // Don't keep discardable cpu memory for GPU backed images. The cache hit rate
1796 // of the cpu fallback (in case we don't find this image in gpu memory) is
1797 // too low to cache this data.
1798 if (image_data->decode.ref_count == 0 &&
1799 image_data->mode != DecodedDataMode::kCpu &&
1800 image_data->HasUploadedData()) {
1801 image_data->decode.ResetData();
1804 // If we have no refs on an uploaded image, it should be unlocked. Do this
1805 // before any attempts to delete the image.
1806 if (image_data->IsGpuOrTransferCache() && image_data->upload.ref_count == 0 &&
1807 image_data->upload.is_locked()) {
1808 UnlockImage(image_data);
1811 // Don't keep around orphaned images.
1812 if (image_data->is_orphaned && !has_any_refs) {
1813 DeleteImage(image_data);
1816 // Don't keep CPU images if they are unused, these images can be recreated by
1817 // re-locking discardable (rather than requiring a full upload like GPU
1819 if (image_data->mode == DecodedDataMode::kCpu && !has_any_refs) {
1820 DeleteImage(image_data);
1823 // If we have image that could be budgeted, but isn't, budget it now.
1824 if (has_any_refs && !image_data->is_budgeted &&
1825 CanFitInWorkingSet(image_data->size)) {
1826 working_set_bytes_ += image_data->size;
1827 working_set_items_ += 1;
1828 image_data->is_budgeted = true;
1831 // We should unlock the decoded image memory for the image in two cases:
1832 // 1) The image is no longer being used (no decode or upload refs).
1833 // 2) This is a non-CPU image that has already been uploaded and we have
1834 // no remaining decode refs.
1835 bool should_unlock_decode = !has_any_refs || (image_data->HasUploadedData() &&
1836 !image_data->decode.ref_count);
1838 if (should_unlock_decode && image_data->decode.is_locked()) {
1839 if (image_data->is_bitmap_backed) {
1840 DCHECK(!image_data->decode.data());
1841 image_data->decode.ResetBitmapImage();
1843 DCHECK(image_data->decode.data());
1844 image_data->decode.Unlock();
1848 // EnsureCapacity to make sure we are under our cache limits.
1852 // Sanity check the above logic.
1853 if (image_data->HasUploadedData()) {
1854 if (image_data->mode == DecodedDataMode::kCpu)
1855 DCHECK(image_data->decode.is_locked());
1857 DCHECK(!image_data->is_budgeted || has_any_refs);
1862 // Checks whether we can fit a new image of size |required_size| in our
1863 // working set. Also frees unreferenced entries to keep us below our preferred
1865 bool GpuImageDecodeCache::EnsureCapacity(size_t required_size) {
1866 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("cc.debug"),
1867 "GpuImageDecodeCache::EnsureCapacity");
1868 lock_.AssertAcquired();
1870 // While we are over preferred item capacity, we iterate through our set of
1871 // cached image data in LRU order, removing unreferenced images.
1872 for (auto it = persistent_cache_.rbegin();
1873 it != persistent_cache_.rend() && ExceedsPreferredCount();) {
1874 if (it->second->decode.ref_count != 0 ||
1875 it->second->upload.ref_count != 0) {
1880 it = RemoveFromPersistentCache(it);
1883 return CanFitInWorkingSet(required_size);
1886 bool GpuImageDecodeCache::CanFitInWorkingSet(size_t size) const {
1887 lock_.AssertAcquired();
1889 if (working_set_items_ >= max_working_set_items_)
1892 base::CheckedNumeric<uint32_t> new_size(working_set_bytes_);
1894 if (!new_size.IsValid() || new_size.ValueOrDie() > max_working_set_bytes_)
1900 bool GpuImageDecodeCache::ExceedsPreferredCount() const {
1901 lock_.AssertAcquired();
1904 if (aggressively_freeing_resources_) {
1905 items_limit = kSuspendedMaxItemsInCacheForGpu;
1907 items_limit = kNormalMaxItemsInCacheForGpu;
1910 return persistent_cache_.size() > items_limit;
1913 void GpuImageDecodeCache::InsertTransferCacheEntry(
1914 const ClientImageTransferCacheEntry& image_entry,
1915 ImageData* image_data) {
1917 uint32_t size = image_entry.SerializedSize();
1918 void* data = context_->ContextSupport()->MapTransferCacheEntry(size);
1920 bool succeeded = image_entry.Serialize(
1921 base::make_span(reinterpret_cast<uint8_t*>(data), size));
1923 context_->ContextSupport()->UnmapAndCreateTransferCacheEntry(
1924 image_entry.UnsafeType(), image_entry.Id());
1925 image_data->upload.SetTransferCacheId(image_entry.Id());
1927 // Transfer cache entry can fail due to a lost gpu context or failure
1928 // to allocate shared memory. Handle this gracefully. Mark this
1929 // image as "decode failed" so that we do not try to handle it again.
1930 // If this was a lost context, we'll recreate this image decode cache.
1931 image_data->decode.decode_failure = true;
1935 bool GpuImageDecodeCache::NeedsDarkModeFilter(const DrawImage& draw_image,
1936 ImageData* image_data) {
1939 // |draw_image| does not need dark mode to be applied.
1940 if (!draw_image.use_dark_mode())
1943 // |dark_mode_filter_| must be valid, if |draw_image| has use_dark_mode set.
1944 DCHECK(dark_mode_filter_);
1946 // TODO(prashant.n): RSDM - Add support for YUV decoded data.
1947 if (image_data->yuva_pixmap_info.has_value())
1950 // Dark mode filter is already generated and cached.
1951 if (image_data->decode.dark_mode_color_filter_cache.find(
1952 draw_image.src_rect()) !=
1953 image_data->decode.dark_mode_color_filter_cache.end())
1959 void GpuImageDecodeCache::DecodeImageAndGenerateDarkModeFilterIfNecessary(
1960 const DrawImage& draw_image,
1961 ImageData* image_data,
1962 TaskType task_type) {
1963 lock_.AssertAcquired();
1965 // Check if image needs dark mode to be applied, based on this image may be
1966 // decoded again if decoded data is not available.
1967 bool needs_dark_mode_filter = NeedsDarkModeFilter(draw_image, image_data);
1968 DecodeImageIfNecessary(draw_image, image_data, task_type,
1969 needs_dark_mode_filter);
1970 if (needs_dark_mode_filter)
1971 GenerateDarkModeFilter(draw_image, image_data);
1974 void GpuImageDecodeCache::DecodeImageIfNecessary(
1975 const DrawImage& draw_image,
1976 ImageData* image_data,
1978 bool needs_decode_for_dark_mode) {
1979 lock_.AssertAcquired();
1981 DCHECK_GT(image_data->decode.ref_count, 0u);
1983 if (image_data->decode.do_hardware_accelerated_decode()) {
1984 // We get here in the case of an at-raster decode.
1988 if (image_data->decode.decode_failure) {
1989 // We have already tried and failed to decode this image. Don't try again.
1993 if (image_data->HasUploadedData() &&
1994 TryLockImage(HaveContextLock::kNo, draw_image, image_data) &&
1995 !needs_decode_for_dark_mode) {
1996 // We already have an uploaded image and we don't need a decode for dark
1997 // mode too, so no reason to decode.
2001 if (image_data->is_bitmap_backed) {
2002 DCHECK(!draw_image.paint_image().IsLazyGenerated());
2003 if (image_data->yuva_pixmap_info.has_value()) {
2004 DLOG(ERROR) << "YUV + Bitmap is unknown and unimplemented!";
2007 image_data->decode.SetBitmapImage(
2008 draw_image.paint_image().GetSwSkImage());
2013 if (image_data->decode.data() &&
2014 (image_data->decode.is_locked() || image_data->decode.Lock())) {
2015 // We already decoded this, or we just needed to lock, early out.
2019 TRACE_EVENT0("cc,benchmark", "GpuImageDecodeCache::DecodeImage");
2021 image_data->decode.ResetData();
2022 std::unique_ptr<base::DiscardableMemory> backing_memory;
2023 sk_sp<SkImage> image;
2024 // These are used only for decoding into YUV.
2025 sk_sp<SkImage> image_y;
2026 sk_sp<SkImage> image_u;
2027 sk_sp<SkImage> image_v;
2029 base::AutoUnlock unlock(lock_);
2030 if (base::FeatureList::IsEnabled(
2031 features::kNoDiscardableMemoryForGpuDecodePath)) {
2033 std::make_unique<HeapDiscardableMemory>(image_data->size);
2035 auto* allocator = base::DiscardableMemoryAllocator::GetInstance();
2036 backing_memory = allocator->AllocateLockedDiscardableMemoryWithRetryOrDie(
2037 image_data->size, base::BindOnce(&GpuImageDecodeCache::ClearCache,
2038 base::Unretained(this)));
2041 sk_sp<SkColorSpace> color_space =
2042 ColorSpaceForImageDecode(draw_image, image_data->mode);
2043 auto release_proc = [](const void*, void*) {};
2044 SkImageInfo image_info = CreateImageInfoForDrawImage(
2045 draw_image, image_data->upload_scale_mip_level);
2046 SkPixmap pixmap(image_info, backing_memory->data(),
2047 image_info.minRowBytes());
2049 // Set |pixmap| to the desired colorspace to decode into.
2050 pixmap.setColorSpace(color_space);
2052 if (image_data->yuva_pixmap_info.has_value()) {
2053 DVLOG(3) << "GpuImageDecodeCache wants to do YUV decoding/rendering";
2057 if (!DrawAndScaleImage(draw_image, &pixmap, generator_client_id_, true,
2058 yuva_supported_data_types_,
2059 image_data->yuva_pixmap_info->dataType(),
2060 &pixmap_y, &pixmap_u, &pixmap_v)) {
2061 DLOG(ERROR) << "DrawAndScaleImage failed.";
2062 backing_memory->Unlock();
2063 backing_memory.reset();
2065 image_y = SkImage::MakeFromRaster(pixmap_y, release_proc, nullptr);
2066 image_u = SkImage::MakeFromRaster(pixmap_u, release_proc, nullptr);
2067 image_v = SkImage::MakeFromRaster(pixmap_v, release_proc, nullptr);
2069 } else { // RGBX decoding is the default path.
2070 if (!DrawAndScaleImage(draw_image, &pixmap, generator_client_id_, false,
2071 yuva_supported_data_types_)) {
2072 DLOG(ERROR) << "DrawAndScaleImage failed.";
2073 backing_memory->Unlock();
2074 backing_memory.reset();
2076 image = SkImage::MakeFromRaster(pixmap, release_proc, nullptr);
2081 if (image_data->decode.data()) {
2082 // An at-raster task decoded this before us. Ignore our decode.
2083 if (image_data->yuva_pixmap_info.has_value()) {
2084 DCHECK(image_data->decode.y_image());
2085 DCHECK(image_data->decode.u_image());
2086 DCHECK(image_data->decode.v_image());
2088 DCHECK(image_data->decode.image());
2093 if (!backing_memory) {
2098 // If |backing_memory| was not populated, we had a non-decodable image.
2099 image_data->decode.decode_failure = true;
2103 if (image_data->yuva_pixmap_info.has_value()) {
2104 image_data->decode.SetLockedData(
2105 std::move(backing_memory), std::move(image_y), std::move(image_u),
2106 std::move(image_v), task_type == TaskType::kOutOfRaster);
2108 image_data->decode.SetLockedData(std::move(backing_memory),
2110 task_type == TaskType::kOutOfRaster);
2114 void GpuImageDecodeCache::GenerateDarkModeFilter(const DrawImage& draw_image,
2115 ImageData* image_data) {
2116 DCHECK(dark_mode_filter_);
2117 // Caller must ensure draw image needs dark mode to be applied.
2118 DCHECK(NeedsDarkModeFilter(draw_image, image_data));
2119 // Caller must ensure image is valid and has decoded data.
2120 DCHECK(image_data->decode.image());
2122 // TODO(prashant.n): Calling ApplyToImage() from |dark_mode_filter_| can be
2123 // expensive. Check the possibilitiy of holding |lock_| only for accessing and
2124 // storing dark mode result on |image_data|.
2125 lock_.AssertAcquired();
2127 if (image_data->decode.decode_failure)
2131 image_data->decode.image()->peekPixels(&pixmap);
2132 image_data->decode.dark_mode_color_filter_cache[draw_image.src_rect()] =
2133 dark_mode_filter_->ApplyToImage(pixmap, draw_image.src_rect());
2136 void GpuImageDecodeCache::UploadImageIfNecessary(const DrawImage& draw_image,
2137 ImageData* image_data) {
2138 CheckContextLockAcquiredIfNecessary();
2139 lock_.AssertAcquired();
2141 // We are about to upload a new image and are holding the context lock.
2142 // Ensure that any images which have been marked for deletion are actually
2143 // cleaned up so we don't exceed our memory limit during this upload.
2144 RunPendingContextThreadOperations();
2146 if (image_data->decode.decode_failure) {
2147 // We were unable to decode this image. Don't try to upload.
2151 // If an upload already exists, try to lock it. If this fails, it will clear
2152 // any uploaded data.
2153 if (image_data->HasUploadedData())
2154 TryLockImage(HaveContextLock::kYes, draw_image, image_data);
2156 // Ensure the mip status is correct before returning the locked upload or
2157 // preparing to upload a new image.
2158 UpdateMipsIfNeeded(draw_image, image_data);
2160 // If we have uploaded data at this point, it is locked with correct mips,
2162 if (image_data->HasUploadedData())
2165 TRACE_EVENT0("cc", "GpuImageDecodeCache::UploadImage");
2166 if (!image_data->decode.do_hardware_accelerated_decode()) {
2167 // These are not needed for accelerated decodes because there was no decode
2169 DCHECK(image_data->decode.is_locked());
2170 image_data->decode.mark_used();
2172 DCHECK_GT(image_data->decode.ref_count, 0u);
2173 DCHECK_GT(image_data->upload.ref_count, 0u);
2175 sk_sp<SkColorSpace> target_color_space =
2176 SupportsColorSpaceConversion() &&
2177 draw_image.target_color_space().IsValid()
2178 ? draw_image.target_color_space().ToSkColorSpace()
2180 // The value of |decoded_target_colorspace| takes into account the fact
2181 // that we might need to ignore an embedded image color space if |color_type_|
2182 // does not support color space conversions or that color conversion might
2183 // have happened at decode time.
2184 sk_sp<SkColorSpace> decoded_target_colorspace =
2185 ColorSpaceForImageDecode(draw_image, image_data->mode);
2186 if (target_color_space && decoded_target_colorspace) {
2187 if (!gfx::ColorSpace(*decoded_target_colorspace).IsToneMappedByDefault() &&
2188 SkColorSpace::Equals(target_color_space.get(),
2189 decoded_target_colorspace.get())) {
2190 target_color_space = nullptr;
2194 absl::optional<TargetColorParams> target_color_params;
2195 if (target_color_space) {
2196 target_color_params = draw_image.target_color_params();
2197 target_color_params->color_space = gfx::ColorSpace(*target_color_space);
2200 if (image_data->mode == DecodedDataMode::kTransferCache) {
2201 DCHECK(use_transfer_cache_);
2202 if (image_data->decode.do_hardware_accelerated_decode()) {
2203 UploadImageIfNecessary_TransferCache_HardwareDecode(
2204 draw_image, image_data, target_color_space);
2205 } else if (image_data->yuva_pixmap_info.has_value()) {
2206 const bool needs_tone_mapping =
2207 decoded_target_colorspace &&
2208 gfx::ColorSpace(*decoded_target_colorspace).IsToneMappedByDefault();
2209 UploadImageIfNecessary_TransferCache_SoftwareDecode_YUVA(
2210 draw_image, image_data, decoded_target_colorspace,
2211 needs_tone_mapping ? target_color_params : absl::nullopt);
2213 UploadImageIfNecessary_TransferCache_SoftwareDecode_RGBA(
2214 draw_image, image_data, target_color_params);
2217 // Grab a reference to our decoded image. For the kCpu path, we will use
2218 // this directly as our "uploaded" data.
2219 sk_sp<SkImage> uploaded_image = image_data->decode.image();
2220 GrMipMapped image_needs_mips =
2221 image_data->needs_mips ? GrMipMapped::kYes : GrMipMapped::kNo;
2223 if (image_data->yuva_pixmap_info.has_value()) {
2224 UploadImageIfNecessary_GpuCpu_YUVA(
2225 draw_image, image_data, uploaded_image, image_needs_mips,
2226 decoded_target_colorspace, target_color_space);
2228 UploadImageIfNecessary_GpuCpu_RGBA(draw_image, image_data, uploaded_image,
2229 image_needs_mips, target_color_space);
2234 void GpuImageDecodeCache::UploadImageIfNecessary_TransferCache_HardwareDecode(
2235 const DrawImage& draw_image,
2236 ImageData* image_data,
2237 sk_sp<SkColorSpace> color_space) {
2238 DCHECK_EQ(image_data->mode, DecodedDataMode::kTransferCache);
2239 DCHECK(use_transfer_cache_);
2240 DCHECK(image_data->decode.do_hardware_accelerated_decode());
2242 // The assumption is that scaling is not currently supported for
2243 // hardware-accelerated decodes.
2244 DCHECK_EQ(0, image_data->upload_scale_mip_level);
2245 const gfx::Size output_size(draw_image.paint_image().width(),
2246 draw_image.paint_image().height());
2248 // Get the encoded data in a contiguous form.
2249 sk_sp<SkData> encoded_data =
2250 draw_image.paint_image().GetSwSkImage()->refEncodedData();
2251 DCHECK(encoded_data);
2252 const uint32_t transfer_cache_id = ClientImageTransferCacheEntry::GetNextId();
2253 const gpu::SyncToken decode_sync_token =
2254 context_->RasterInterface()->ScheduleImageDecode(
2255 base::make_span(encoded_data->bytes(), encoded_data->size()),
2256 output_size, transfer_cache_id,
2257 color_space ? gfx::ColorSpace(*color_space) : gfx::ColorSpace(),
2258 image_data->needs_mips);
2260 if (!decode_sync_token.HasData()) {
2261 image_data->decode.decode_failure = true;
2265 image_data->upload.SetTransferCacheId(transfer_cache_id);
2267 // Note that we wait for the decode sync token here for two reasons:
2269 // 1) To make sure that raster work that depends on the image decode
2270 // happens after the decode completes.
2272 // 2) To protect the transfer cache entry from being unlocked on the
2273 // service side before the decode is completed.
2274 context_->RasterInterface()->WaitSyncTokenCHROMIUM(
2275 decode_sync_token.GetConstData());
2278 void GpuImageDecodeCache::
2279 UploadImageIfNecessary_TransferCache_SoftwareDecode_YUVA(
2280 const DrawImage& draw_image,
2281 ImageData* image_data,
2282 sk_sp<SkColorSpace> decoded_target_colorspace,
2283 absl::optional<TargetColorParams> target_color_params) {
2284 DCHECK_EQ(image_data->mode, DecodedDataMode::kTransferCache);
2285 DCHECK(use_transfer_cache_);
2286 DCHECK(!image_data->decode.do_hardware_accelerated_decode());
2287 DCHECK(image_data->yuva_pixmap_info.has_value());
2289 SkPixmap yuv_pixmaps[3];
2290 if (!image_data->decode.y_image()->peekPixels(&yuv_pixmaps[0]) ||
2291 !image_data->decode.u_image()->peekPixels(&yuv_pixmaps[1]) ||
2292 !image_data->decode.v_image()->peekPixels(&yuv_pixmaps[2])) {
2295 ClientImageTransferCacheEntry image_entry(
2296 yuv_pixmaps, image_data->yuva_pixmap_info->yuvaInfo().planeConfig(),
2297 image_data->yuva_pixmap_info->yuvaInfo().subsampling(),
2298 decoded_target_colorspace.get(),
2299 image_data->yuva_pixmap_info->yuvaInfo().yuvColorSpace(),
2300 image_data->needs_mips, target_color_params);
2301 if (!image_entry.IsValid())
2303 InsertTransferCacheEntry(image_entry, image_data);
2306 void GpuImageDecodeCache::
2307 UploadImageIfNecessary_TransferCache_SoftwareDecode_RGBA(
2308 const DrawImage& draw_image,
2309 ImageData* image_data,
2310 absl::optional<TargetColorParams> target_color_params) {
2311 DCHECK_EQ(image_data->mode, DecodedDataMode::kTransferCache);
2312 DCHECK(use_transfer_cache_);
2313 DCHECK(!image_data->decode.do_hardware_accelerated_decode());
2314 DCHECK(!image_data->yuva_pixmap_info.has_value());
2317 if (!image_data->decode.image()->peekPixels(&pixmap))
2320 ClientImageTransferCacheEntry image_entry(&pixmap, image_data->needs_mips,
2321 target_color_params);
2322 if (!image_entry.IsValid())
2324 InsertTransferCacheEntry(image_entry, image_data);
2327 void GpuImageDecodeCache::UploadImageIfNecessary_GpuCpu_YUVA(
2328 const DrawImage& draw_image,
2329 ImageData* image_data,
2330 sk_sp<SkImage> uploaded_image,
2331 GrMipMapped image_needs_mips,
2332 sk_sp<SkColorSpace> decoded_target_colorspace,
2333 sk_sp<SkColorSpace> color_space) {
2334 DCHECK(!use_transfer_cache_);
2335 DCHECK(image_data->yuva_pixmap_info.has_value());
2337 // Grab a reference to our decoded image. For the kCpu path, we will use
2338 // this directly as our "uploaded" data.
2339 sk_sp<SkImage> uploaded_y_image = image_data->decode.y_image();
2340 sk_sp<SkImage> uploaded_u_image = image_data->decode.u_image();
2341 sk_sp<SkImage> uploaded_v_image = image_data->decode.v_image();
2343 // For kGpu, we upload and color convert (if necessary).
2344 if (image_data->mode == DecodedDataMode::kGpu) {
2345 DCHECK(!use_transfer_cache_);
2346 base::AutoUnlock unlock(lock_);
2347 uploaded_y_image = uploaded_y_image->makeTextureImage(context_->GrContext(),
2349 uploaded_u_image = uploaded_u_image->makeTextureImage(context_->GrContext(),
2351 uploaded_v_image = uploaded_v_image->makeTextureImage(context_->GrContext(),
2353 if (!uploaded_y_image || !uploaded_u_image || !uploaded_v_image) {
2354 DLOG(WARNING) << "TODO(crbug.com/740737): Context was lost. Early out.";
2358 int image_width = uploaded_y_image->width();
2359 int image_height = uploaded_y_image->height();
2360 uploaded_image = CreateImageFromYUVATexturesInternal(
2361 uploaded_y_image.get(), uploaded_u_image.get(), uploaded_v_image.get(),
2362 image_width, image_height,
2363 image_data->yuva_pixmap_info->yuvaInfo().planeConfig(),
2364 image_data->yuva_pixmap_info->yuvaInfo().subsampling(),
2365 image_data->yuva_pixmap_info->yuvaInfo().yuvColorSpace(), color_space,
2366 decoded_target_colorspace);
2369 // At-raster may have decoded this while we were unlocked. If so, ignore our
2371 if (image_data->HasUploadedData()) {
2372 if (uploaded_image) {
2373 DCHECK(uploaded_y_image);
2374 DCHECK(uploaded_u_image);
2375 DCHECK(uploaded_v_image);
2376 // We do not call DeleteSkImageAndPreventCaching for |uploaded_image|
2377 // because calls to getBackendTexture will flatten the YUV planes to
2378 // an RGB texture only to immediately delete it.
2379 DeleteSkImageAndPreventCaching(context_, std::move(uploaded_y_image));
2380 DeleteSkImageAndPreventCaching(context_, std::move(uploaded_u_image));
2381 DeleteSkImageAndPreventCaching(context_, std::move(uploaded_v_image));
2386 // TODO(crbug.com/740737): |uploaded_image| is sometimes null in certain
2387 // context-lost situations, so it is handled with an early out.
2388 if (!uploaded_image || !uploaded_y_image || !uploaded_u_image ||
2389 !uploaded_v_image) {
2390 DLOG(WARNING) << "TODO(crbug.com/740737): Context was lost. Early out.";
2394 uploaded_y_image = TakeOwnershipOfSkImageBacking(context_->GrContext(),
2395 std::move(uploaded_y_image));
2396 uploaded_u_image = TakeOwnershipOfSkImageBacking(context_->GrContext(),
2397 std::move(uploaded_u_image));
2398 uploaded_v_image = TakeOwnershipOfSkImageBacking(context_->GrContext(),
2399 std::move(uploaded_v_image));
2401 image_data->upload.SetImage(std::move(uploaded_image),
2402 image_data->yuva_pixmap_info.has_value());
2403 image_data->upload.SetYuvImage(std::move(uploaded_y_image),
2404 std::move(uploaded_u_image),
2405 std::move(uploaded_v_image));
2407 // If we have a new GPU-backed image, initialize it for use in the GPU
2408 // discardable system.
2409 if (image_data->mode == DecodedDataMode::kGpu) {
2410 // Notify the discardable system of the planes so they will count against
2412 context_->RasterInterface()->InitializeDiscardableTextureCHROMIUM(
2413 image_data->upload.gl_y_id());
2414 context_->RasterInterface()->InitializeDiscardableTextureCHROMIUM(
2415 image_data->upload.gl_u_id());
2416 context_->RasterInterface()->InitializeDiscardableTextureCHROMIUM(
2417 image_data->upload.gl_v_id());
2421 void GpuImageDecodeCache::UploadImageIfNecessary_GpuCpu_RGBA(
2422 const DrawImage& draw_image,
2423 ImageData* image_data,
2424 sk_sp<SkImage> uploaded_image,
2425 GrMipMapped image_needs_mips,
2426 sk_sp<SkColorSpace> color_space) {
2427 DCHECK(!use_transfer_cache_);
2428 DCHECK(!image_data->yuva_pixmap_info.has_value());
2430 // RGBX decoding is below.
2431 // For kGpu, we upload and color convert (if necessary).
2432 if (image_data->mode == DecodedDataMode::kGpu) {
2433 DCHECK(!use_transfer_cache_);
2434 base::AutoUnlock unlock(lock_);
2435 uploaded_image = MakeTextureImage(context_, std::move(uploaded_image),
2436 color_space, image_needs_mips);
2439 // At-raster may have decoded this while we were unlocked. If so, ignore our
2441 if (image_data->upload.image()) {
2443 DeleteSkImageAndPreventCaching(context_, std::move(uploaded_image));
2447 // Take ownership of any GL texture backing for the SkImage. This allows
2448 // us to use the image with the discardable system.
2449 if (uploaded_image) {
2450 uploaded_image = TakeOwnershipOfSkImageBacking(context_->GrContext(),
2451 std::move(uploaded_image));
2454 // TODO(crbug.com/740737): uploaded_image is sometimes null in certain
2455 // context-lost situations.
2456 if (!uploaded_image) {
2457 DLOG(WARNING) << "TODO(crbug.com/740737): Context was lost. Early out.";
2461 image_data->upload.SetImage(std::move(uploaded_image));
2463 // If we have a new GPU-backed image, initialize it for use in the GPU
2464 // discardable system.
2465 if (image_data->mode == DecodedDataMode::kGpu) {
2466 // Notify the discardable system of this image so it will count against
2468 context_->RasterInterface()->InitializeDiscardableTextureCHROMIUM(
2469 image_data->upload.gl_id());
2473 scoped_refptr<GpuImageDecodeCache::ImageData>
2474 GpuImageDecodeCache::CreateImageData(const DrawImage& draw_image,
2475 bool allow_hardware_decode) {
2476 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("cc.debug"),
2477 "GpuImageDecodeCache::CreateImageData");
2478 lock_.AssertAcquired();
2480 int upload_scale_mip_level = CalculateUploadScaleMipLevel(draw_image);
2481 bool needs_mips = ShouldGenerateMips(draw_image, upload_scale_mip_level);
2482 SkImageInfo image_info =
2483 CreateImageInfoForDrawImage(draw_image, upload_scale_mip_level);
2484 const bool image_larger_than_max_texture =
2485 image_info.width() > max_texture_size_ ||
2486 image_info.height() > max_texture_size_;
2487 DecodedDataMode mode;
2488 if (use_transfer_cache_) {
2489 mode = DecodedDataMode::kTransferCache;
2490 } else if (image_larger_than_max_texture) {
2491 // Image too large to upload. Try to use SW fallback.
2492 mode = DecodedDataMode::kCpu;
2494 mode = DecodedDataMode::kGpu;
2497 size_t data_size = image_info.computeMinByteSize();
2498 DCHECK(!SkImageInfo::ByteSizeOverflowed(data_size));
2500 // We need to cache the result of color conversion on the cpu if the image
2501 // will be color converted during the decode.
2502 auto decode_color_space = ColorSpaceForImageDecode(draw_image, mode);
2503 const bool cache_color_conversion_on_cpu =
2504 decode_color_space &&
2505 !SkColorSpace::Equals(decode_color_space.get(),
2506 draw_image.paint_image().color_space());
2508 // |is_bitmap_backed| specifies whether the image has pixel data which can
2509 // directly be used for the upload. This will be the case for non-lazy images
2510 // used at the original scale. In these cases, we don't internally cache any
2511 // cpu component for the image.
2512 // However, if the image will be scaled or color converts on the cpu, we
2513 // consider it a lazy image and cache the scaled result in discardable memory.
2514 const bool is_bitmap_backed = !draw_image.paint_image().IsLazyGenerated() &&
2515 upload_scale_mip_level == 0 &&
2516 !cache_color_conversion_on_cpu;
2518 // Figure out if we will do hardware accelerated decoding. The criteria is as
2521 // - The caller allows hardware decodes.
2522 // - We are using the transfer cache (OOP-R).
2523 // - The image does not require downscaling for uploading (see TODO below).
2524 // - The image is supported according to the profiles advertised by the GPU
2527 // TODO(crbug.com/953367): currently, we don't support scaling with hardware
2528 // decode acceleration. Note that it's still okay for the image to be
2529 // downscaled by Skia using the GPU.
2530 const ImageHeaderMetadata* image_metadata =
2531 draw_image.paint_image().GetImageHeaderMetadata();
2532 bool can_do_hardware_accelerated_decode = false;
2533 bool do_hardware_accelerated_decode = false;
2534 if (allow_hardware_decode && mode == DecodedDataMode::kTransferCache &&
2535 upload_scale_mip_level == 0 &&
2536 context_->ContextSupport()->CanDecodeWithHardwareAcceleration(
2538 DCHECK(image_metadata);
2539 DCHECK_EQ(image_metadata->image_size.width(),
2540 draw_image.paint_image().width());
2541 DCHECK_EQ(image_metadata->image_size.height(),
2542 draw_image.paint_image().height());
2544 can_do_hardware_accelerated_decode = true;
2545 const bool is_jpeg = (image_metadata->image_type == ImageType::kJPEG);
2546 const bool is_webp = (image_metadata->image_type == ImageType::kWEBP);
2547 if ((is_jpeg && allow_accelerated_jpeg_decodes_) ||
2548 (is_webp && allow_accelerated_webp_decodes_)) {
2549 do_hardware_accelerated_decode = true;
2550 data_size = EstimateHardwareDecodedDataSize(image_metadata);
2551 DCHECK(!is_bitmap_backed);
2555 SkYUVAPixmapInfo yuva_pixmap_info;
2556 const bool is_yuv = !do_hardware_accelerated_decode &&
2557 draw_image.paint_image().IsYuv(yuva_supported_data_types_,
2558 &yuva_pixmap_info) &&
2559 mode != DecodedDataMode::kCpu &&
2560 !image_larger_than_max_texture;
2562 absl::optional<SkYUVAPixmapInfo> optional_yuva_pixmap_info;
2564 DCHECK(yuva_pixmap_info.isValid());
2565 if (upload_scale_mip_level > 0) {
2566 // Scaled decode. We always promote to 4:4:4 when scaling YUV to avoid
2567 // blurriness. See comment in DrawAndScaleImage() for details 0
2568 SkYUVAInfo yuva_info = yuva_pixmap_info.yuvaInfo().makeSubsampling(
2569 SkYUVAInfo::Subsampling::k444);
2570 size_t row_bytes[SkYUVAInfo::kMaxPlanes] = {};
2571 for (int i = 0; i < yuva_info.numPlanes(); ++i) {
2572 row_bytes[i] = yuva_pixmap_info.rowBytes(0);
2574 optional_yuva_pixmap_info =
2575 SkYUVAPixmapInfo(yuva_info, yuva_pixmap_info.dataType(), row_bytes);
2577 // Original size decode.
2578 optional_yuva_pixmap_info = yuva_pixmap_info;
2580 data_size = optional_yuva_pixmap_info->computeTotalBytes();
2581 DCHECK(!SkImageInfo::ByteSizeOverflowed(data_size));
2583 return base::WrapRefCounted(new ImageData(
2584 draw_image.paint_image().stable_id(), mode, data_size,
2585 draw_image.target_color_params(),
2586 CalculateDesiredFilterQuality(draw_image), upload_scale_mip_level,
2587 needs_mips, is_bitmap_backed, can_do_hardware_accelerated_decode,
2588 do_hardware_accelerated_decode, optional_yuva_pixmap_info));
2591 void GpuImageDecodeCache::WillAddCacheEntry(const DrawImage& draw_image) {
2592 // Remove any old entries for this image. We keep at-most 2 ContentIds for a
2593 // PaintImage (pending and active tree).
2594 auto& cache_entries =
2595 paint_image_entries_[draw_image.paint_image().stable_id()];
2596 cache_entries.count++;
2598 auto& cached_content_ids = cache_entries.content_ids;
2599 const PaintImage::ContentId new_content_id =
2600 draw_image.frame_key().content_id();
2602 if (cached_content_ids[0] == new_content_id ||
2603 cached_content_ids[1] == new_content_id) {
2607 if (cached_content_ids[0] == PaintImage::kInvalidContentId) {
2608 cached_content_ids[0] = new_content_id;
2612 if (cached_content_ids[1] == PaintImage::kInvalidContentId) {
2613 cached_content_ids[1] = new_content_id;
2617 const PaintImage::ContentId content_id_to_remove =
2618 std::min(cached_content_ids[0], cached_content_ids[1]);
2619 const PaintImage::ContentId content_id_to_keep =
2620 std::max(cached_content_ids[0], cached_content_ids[1]);
2621 DCHECK_NE(content_id_to_remove, content_id_to_keep);
2623 for (auto it = persistent_cache_.begin(); it != persistent_cache_.end();) {
2624 if (it->first.content_id() != content_id_to_remove) {
2627 it = RemoveFromPersistentCache(it);
2631 // Removing entries from the persistent cache should not erase the tracking
2632 // for the current paint_image, since we have 2 different content ids for it
2633 // and only one of them was erased above.
2634 DCHECK_NE(paint_image_entries_.count(draw_image.paint_image().stable_id()),
2637 cached_content_ids[0] = content_id_to_keep;
2638 cached_content_ids[1] = new_content_id;
2641 void GpuImageDecodeCache::DeleteImage(ImageData* image_data) {
2642 if (image_data->HasUploadedData()) {
2643 DCHECK(!image_data->upload.is_locked());
2644 if (image_data->mode == DecodedDataMode::kGpu) {
2645 if (image_data->yuva_pixmap_info.has_value()) {
2646 images_pending_deletion_.push_back(image_data->upload.y_image());
2647 images_pending_deletion_.push_back(image_data->upload.u_image());
2648 images_pending_deletion_.push_back(image_data->upload.v_image());
2649 yuv_images_pending_deletion_.push_back(image_data->upload.image());
2651 images_pending_deletion_.push_back(image_data->upload.image());
2654 if (image_data->mode == DecodedDataMode::kTransferCache)
2655 ids_pending_deletion_.push_back(*image_data->upload.transfer_cache_id());
2657 image_data->upload.Reset();
2660 void GpuImageDecodeCache::UnlockImage(ImageData* image_data) {
2661 DCHECK(image_data->HasUploadedData());
2662 if (image_data->mode == DecodedDataMode::kGpu) {
2663 if (image_data->yuva_pixmap_info.has_value()) {
2664 images_pending_unlock_.push_back(image_data->upload.y_image().get());
2665 images_pending_unlock_.push_back(image_data->upload.u_image().get());
2666 images_pending_unlock_.push_back(image_data->upload.v_image().get());
2667 yuv_images_pending_unlock_.push_back(image_data->upload.image());
2669 images_pending_unlock_.push_back(image_data->upload.image().get());
2672 DCHECK(image_data->mode == DecodedDataMode::kTransferCache);
2673 ids_pending_unlock_.push_back(*image_data->upload.transfer_cache_id());
2675 image_data->upload.OnUnlock();
2677 // If we were holding onto an unmipped image for defering deletion, do it now
2678 // it is guarenteed to have no-refs.
2679 auto unmipped_image = image_data->upload.take_unmipped_image();
2680 if (unmipped_image) {
2681 if (image_data->yuva_pixmap_info.has_value()) {
2682 auto unmipped_y_image = image_data->upload.take_unmipped_y_image();
2683 auto unmipped_u_image = image_data->upload.take_unmipped_u_image();
2684 auto unmipped_v_image = image_data->upload.take_unmipped_v_image();
2685 DCHECK(unmipped_y_image);
2686 DCHECK(unmipped_u_image);
2687 DCHECK(unmipped_v_image);
2688 images_pending_deletion_.push_back(std::move(unmipped_y_image));
2689 images_pending_deletion_.push_back(std::move(unmipped_u_image));
2690 images_pending_deletion_.push_back(std::move(unmipped_v_image));
2691 yuv_images_pending_deletion_.push_back(std::move(unmipped_image));
2693 images_pending_deletion_.push_back(std::move(unmipped_image));
2698 // YUV images are handled slightly differently because they are not themselves
2699 // registered with the discardable memory system. We cannot use
2700 // GlIdFromSkImage on these YUV SkImages to flush pending operations because
2701 // doing so will flatten it to RGB.
2702 void GpuImageDecodeCache::FlushYUVImages(
2703 std::vector<sk_sp<SkImage>>* yuv_images) {
2704 CheckContextLockAcquiredIfNecessary();
2705 lock_.AssertAcquired();
2706 for (auto& image : *yuv_images) {
2707 image->flushAndSubmit(context_->GrContext());
2709 yuv_images->clear();
2712 // We always run pending operations in the following order:
2714 // > Flush YUV images that will be unlocked
2716 // > Flush YUV images that will be deleted
2718 // This ensures that:
2719 // a) We never fully unlock an image that's pending lock (lock before unlock)
2720 // b) We never delete an image that has pending locks/unlocks.
2721 // c) We never unlock or delete the underlying texture planes for a YUV
2722 // image before all operations referencing it have completed.
2724 // As this can be run at-raster, to unlock/delete an image that was just used,
2725 // we need to call GlIdFromSkImage, which flushes pending IO on the image,
2726 // rather than just using a cached GL ID.
2727 // YUV images are handled slightly differently because they are backed by
2728 // texture images but are not themselves registered with the discardable memory
2729 // system. We wait to delete the pointer to a YUV image until we have a context
2730 // lock and its textures have been deleted.
2731 void GpuImageDecodeCache::RunPendingContextThreadOperations() {
2732 CheckContextLockAcquiredIfNecessary();
2733 lock_.AssertAcquired();
2735 for (auto* image : images_pending_complete_lock_) {
2736 context_->ContextSupport()->CompleteLockDiscardableTexureOnContextThread(
2737 GlIdFromSkImage(image));
2739 images_pending_complete_lock_.clear();
2741 FlushYUVImages(&yuv_images_pending_unlock_);
2742 for (auto* image : images_pending_unlock_) {
2743 context_->RasterInterface()->UnlockDiscardableTextureCHROMIUM(
2744 GlIdFromSkImage(image));
2746 images_pending_unlock_.clear();
2748 for (auto id : ids_pending_unlock_) {
2749 context_->ContextSupport()->UnlockTransferCacheEntries({std::make_pair(
2750 static_cast<uint32_t>(TransferCacheEntryType::kImage), id)});
2752 ids_pending_unlock_.clear();
2754 FlushYUVImages(&yuv_images_pending_deletion_);
2755 for (auto& image : images_pending_deletion_) {
2756 uint32_t texture_id = GlIdFromSkImage(image.get());
2757 if (context_->RasterInterface()->LockDiscardableTextureCHROMIUM(
2759 context_->RasterInterface()->DeleteGpuRasterTexture(texture_id);
2762 images_pending_deletion_.clear();
2764 for (auto id : ids_pending_deletion_) {
2765 if (context_->ContextSupport()->ThreadsafeLockTransferCacheEntry(
2766 static_cast<uint32_t>(TransferCacheEntryType::kImage), id)) {
2767 context_->ContextSupport()->DeleteTransferCacheEntry(
2768 static_cast<uint32_t>(TransferCacheEntryType::kImage), id);
2771 ids_pending_deletion_.clear();
2774 SkImageInfo GpuImageDecodeCache::CreateImageInfoForDrawImage(
2775 const DrawImage& draw_image,
2776 int upload_scale_mip_level) const {
2777 gfx::Size mip_size =
2778 CalculateSizeForMipLevel(draw_image, upload_scale_mip_level);
2780 // Decide the SkColorType for the buffer for the PaintImage to draw or
2781 // decode into. Default to using the cache's color type.
2782 SkColorType color_type = color_type_;
2784 // The PaintImage will identify that its content is high bit depth by setting
2785 // its SkColorType to kRGBA_F16_SkColorType. Only set the target SkColorType
2786 // to this value if the PaintImage itself reports it. Otherwise, the content
2787 // may not appear, see https://crbug.com/1266456.
2788 const auto image_color_type = draw_image.paint_image().GetColorType();
2789 if (image_color_type == kRGBA_F16_SkColorType) {
2790 // Only set the target SkColorType to kRGBA_F16_SkColorType if the content
2791 // is HDR and the target display is HDR capable. This is done to preserve
2792 // existing behavior while fixing the above mentioned bug. See related
2793 // discussions in https://crbug.com/1076568.
2794 if (draw_image.paint_image().GetContentColorUsage() ==
2795 gfx::ContentColorUsage::kHDR &&
2796 draw_image.target_color_space().IsHDR()) {
2797 color_type = kRGBA_F16_SkColorType;
2801 return SkImageInfo::Make(mip_size.width(), mip_size.height(), color_type,
2802 kPremul_SkAlphaType);
2805 bool GpuImageDecodeCache::TryLockImage(HaveContextLock have_context_lock,
2806 const DrawImage& draw_image,
2808 DCHECK(data->HasUploadedData());
2810 if (data->upload.is_locked())
2813 if (data->mode == DecodedDataMode::kTransferCache) {
2814 DCHECK(use_transfer_cache_);
2815 DCHECK(data->upload.transfer_cache_id());
2816 if (context_->ContextSupport()->ThreadsafeLockTransferCacheEntry(
2817 static_cast<uint32_t>(TransferCacheEntryType::kImage),
2818 *data->upload.transfer_cache_id())) {
2819 data->upload.OnLock();
2822 } else if (have_context_lock == HaveContextLock::kYes) {
2823 auto* ri = context_->RasterInterface();
2824 // If |have_context_lock|, we can immediately lock the image and send
2825 // the lock command to the GPU process.
2826 // TODO(crbug.com/914622): Add Chrome GL extension to upload texture array.
2827 if (data->yuva_pixmap_info.has_value() &&
2828 ri->LockDiscardableTextureCHROMIUM(data->upload.gl_y_id()) &&
2829 ri->LockDiscardableTextureCHROMIUM(data->upload.gl_u_id()) &&
2830 ri->LockDiscardableTextureCHROMIUM(data->upload.gl_v_id())) {
2831 DCHECK(!use_transfer_cache_);
2832 DCHECK(data->mode == DecodedDataMode::kGpu);
2833 data->upload.OnLock();
2835 } else if (!data->yuva_pixmap_info.has_value() &&
2836 ri->LockDiscardableTextureCHROMIUM(data->upload.gl_id())) {
2837 DCHECK(!use_transfer_cache_);
2838 DCHECK(data->mode == DecodedDataMode::kGpu);
2839 data->upload.OnLock();
2843 // If !|have_context_lock|, we use
2844 // ThreadsafeShallowLockDiscardableTexture. This takes a reference to the
2845 // image, ensuring that it can't be deleted by the service, but delays
2846 // sending a lock command over the command buffer. This command must be
2847 // sent before the image is used, but is now guaranteed to succeed. We
2848 // will send this command via
2849 // CompleteLockDiscardableTextureOnContextThread in
2850 // UploadImageIfNecessary, which is guaranteed to run before the texture
2852 auto* context_support = context_->ContextSupport();
2853 if (data->yuva_pixmap_info.has_value() &&
2854 context_support->ThreadSafeShallowLockDiscardableTexture(
2855 data->upload.gl_y_id()) &&
2856 context_support->ThreadSafeShallowLockDiscardableTexture(
2857 data->upload.gl_u_id()) &&
2858 context_support->ThreadSafeShallowLockDiscardableTexture(
2859 data->upload.gl_v_id())) {
2860 DCHECK(!use_transfer_cache_);
2861 DCHECK(data->mode == DecodedDataMode::kGpu);
2862 data->upload.OnLock();
2863 images_pending_complete_lock_.push_back(data->upload.y_image().get());
2864 images_pending_complete_lock_.push_back(data->upload.u_image().get());
2865 images_pending_complete_lock_.push_back(data->upload.v_image().get());
2867 } else if (!data->yuva_pixmap_info.has_value() &&
2868 context_support->ThreadSafeShallowLockDiscardableTexture(
2869 data->upload.gl_id())) {
2870 DCHECK(!use_transfer_cache_);
2871 DCHECK(data->mode == DecodedDataMode::kGpu);
2872 data->upload.OnLock();
2873 images_pending_complete_lock_.push_back(data->upload.image().get());
2878 // Couldn't lock, abandon the image.
2883 // Tries to find an ImageData that can be used to draw the provided
2884 // |draw_image|. First looks for an exact entry in our |in_use_cache_|. If one
2885 // cannot be found, it looks for a compatible entry in our |persistent_cache_|.
2886 GpuImageDecodeCache::ImageData* GpuImageDecodeCache::GetImageDataForDrawImage(
2887 const DrawImage& draw_image,
2888 const InUseCacheKey& key) {
2889 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("cc.debug"),
2890 "GpuImageDecodeCache::GetImageDataForDrawImage");
2891 lock_.AssertAcquired();
2892 DCHECK(UseCacheForDrawImage(draw_image));
2894 auto found_in_use = in_use_cache_.find(key);
2895 if (found_in_use != in_use_cache_.end())
2896 return found_in_use->second.image_data.get();
2898 auto found_persistent = persistent_cache_.Get(draw_image.frame_key());
2899 if (found_persistent != persistent_cache_.end()) {
2900 ImageData* image_data = found_persistent->second.get();
2901 if (IsCompatible(image_data, draw_image)) {
2904 RemoveFromPersistentCache(found_persistent);
2911 // Determines if we can draw the provided |draw_image| using the provided
2912 // |image_data|. This is true if the |image_data| is not scaled, or if it
2913 // is scaled at an equal or larger scale and equal or larger quality to
2914 // the provided |draw_image|.
2915 bool GpuImageDecodeCache::IsCompatible(const ImageData* image_data,
2916 const DrawImage& draw_image) const {
2917 bool is_scaled = image_data->upload_scale_mip_level != 0;
2918 bool scale_is_compatible = CalculateUploadScaleMipLevel(draw_image) >=
2919 image_data->upload_scale_mip_level;
2920 bool quality_is_compatible =
2921 CalculateDesiredFilterQuality(draw_image) <= image_data->quality;
2922 sk_sp<SkColorSpace> decoded_target_colorspace =
2923 ColorSpaceForImageDecode(draw_image, image_data->mode);
2924 bool color_is_compatible = false;
2925 if (!decoded_target_colorspace ||
2926 !gfx::ColorSpace(*decoded_target_colorspace).IsToneMappedByDefault()) {
2927 color_is_compatible = image_data->target_color_params.color_space ==
2928 draw_image.target_color_space();
2930 color_is_compatible =
2931 image_data->target_color_params == draw_image.target_color_params();
2933 if (!color_is_compatible)
2935 if (is_scaled && (!scale_is_compatible || !quality_is_compatible))
2940 size_t GpuImageDecodeCache::GetDrawImageSizeForTesting(const DrawImage& image) {
2941 base::AutoLock lock(lock_);
2942 scoped_refptr<ImageData> data =
2943 CreateImageData(image, false /* allow_hardware_decode */);
2947 void GpuImageDecodeCache::SetImageDecodingFailedForTesting(
2948 const DrawImage& image) {
2949 base::AutoLock lock(lock_);
2950 auto found = persistent_cache_.Peek(image.frame_key());
2951 DCHECK(found != persistent_cache_.end());
2952 ImageData* image_data = found->second.get();
2953 image_data->decode.decode_failure = true;
2956 bool GpuImageDecodeCache::DiscardableIsLockedForTesting(
2957 const DrawImage& image) {
2958 base::AutoLock lock(lock_);
2959 auto found = persistent_cache_.Peek(image.frame_key());
2960 DCHECK(found != persistent_cache_.end());
2961 ImageData* image_data = found->second.get();
2962 return image_data->decode.is_locked();
2965 bool GpuImageDecodeCache::IsInInUseCacheForTesting(
2966 const DrawImage& image) const {
2967 auto found = in_use_cache_.find(InUseCacheKeyFromDrawImage(image));
2968 return found != in_use_cache_.end();
2971 bool GpuImageDecodeCache::IsInPersistentCacheForTesting(
2972 const DrawImage& image) const {
2973 auto found = persistent_cache_.Peek(image.frame_key());
2974 return found != persistent_cache_.end();
2977 sk_sp<SkImage> GpuImageDecodeCache::GetSWImageDecodeForTesting(
2978 const DrawImage& image) {
2979 base::AutoLock lock(lock_);
2980 auto found = persistent_cache_.Peek(image.frame_key());
2981 DCHECK(found != persistent_cache_.end());
2982 ImageData* image_data = found->second.get();
2983 DCHECK(!image_data->yuva_pixmap_info.has_value());
2984 return image_data->decode.ImageForTesting();
2987 // Used for in-process-raster YUV decoding tests, where we often need the
2988 // SkImages for each underlying plane because asserting or requesting fields for
2989 // the YUV SkImage may flatten it to RGB or not be possible to request.
2990 sk_sp<SkImage> GpuImageDecodeCache::GetUploadedPlaneForTesting(
2991 const DrawImage& draw_image,
2993 base::AutoLock lock(lock_);
2994 ImageData* image_data = GetImageDataForDrawImage(
2995 draw_image, InUseCacheKeyFromDrawImage(draw_image));
2996 if (!image_data->yuva_pixmap_info.has_value())
3000 return image_data->upload.y_image();
3002 return image_data->upload.u_image();
3004 return image_data->upload.v_image();
3010 size_t GpuImageDecodeCache::GetDarkModeImageCacheSizeForTesting(
3011 const DrawImage& draw_image) {
3012 base::AutoLock lock(lock_);
3013 ImageData* image_data = GetImageDataForDrawImage(
3014 draw_image, InUseCacheKeyFromDrawImage(draw_image));
3015 return image_data ? image_data->decode.dark_mode_color_filter_cache.size()
3019 bool GpuImageDecodeCache::NeedsDarkModeFilterForTesting(
3020 const DrawImage& draw_image) {
3021 base::AutoLock lock(lock_);
3022 ImageData* image_data = GetImageDataForDrawImage(
3023 draw_image, InUseCacheKeyFromDrawImage(draw_image));
3025 return NeedsDarkModeFilter(draw_image, image_data);
3028 void GpuImageDecodeCache::OnMemoryPressure(
3029 base::MemoryPressureListener::MemoryPressureLevel level) {
3030 if (!ImageDecodeCacheUtils::ShouldEvictCaches(level))
3033 base::AutoLock lock(lock_);
3034 base::AutoReset<bool> reset(&aggressively_freeing_resources_, true);
3038 bool GpuImageDecodeCache::SupportsColorSpaceConversion() const {
3039 switch (color_type_) {
3040 case kRGBA_8888_SkColorType:
3041 case kBGRA_8888_SkColorType:
3042 case kRGBA_F16_SkColorType:
3049 sk_sp<SkColorSpace> GpuImageDecodeCache::ColorSpaceForImageDecode(
3050 const DrawImage& image,
3051 DecodedDataMode mode) const {
3052 if (!SupportsColorSpaceConversion())
3055 // For kGpu or kTransferCache images color conversion is handled during
3056 // upload, so keep the original colorspace here.
3057 return sk_ref_sp(image.paint_image().color_space());
3060 void GpuImageDecodeCache::CheckContextLockAcquiredIfNecessary() {
3061 if (!context_->GetLock())
3063 context_->GetLock()->AssertAcquired();
3066 sk_sp<SkImage> GpuImageDecodeCache::CreateImageFromYUVATexturesInternal(
3067 const SkImage* uploaded_y_image,
3068 const SkImage* uploaded_u_image,
3069 const SkImage* uploaded_v_image,
3070 const int image_width,
3071 const int image_height,
3072 const SkYUVAInfo::PlaneConfig yuva_plane_config,
3073 const SkYUVAInfo::Subsampling yuva_subsampling,
3074 const SkYUVColorSpace yuv_color_space,
3075 sk_sp<SkColorSpace> target_color_space,
3076 sk_sp<SkColorSpace> decoded_color_space) const {
3077 DCHECK(uploaded_y_image);
3078 DCHECK(uploaded_u_image);
3079 DCHECK(uploaded_v_image);
3080 SkYUVAInfo yuva_info({image_width, image_height}, yuva_plane_config,
3081 yuva_subsampling, yuv_color_space);
3082 GrBackendTexture yuv_textures[3]{};
3083 yuv_textures[0] = uploaded_y_image->getBackendTexture(false);
3084 yuv_textures[1] = uploaded_u_image->getBackendTexture(false);
3085 yuv_textures[2] = uploaded_v_image->getBackendTexture(false);
3086 GrYUVABackendTextures yuva_backend_textures(yuva_info, yuv_textures,
3087 kTopLeft_GrSurfaceOrigin);
3088 DCHECK(yuva_backend_textures.isValid());
3090 if (target_color_space && SkColorSpace::Equals(target_color_space.get(),
3091 decoded_color_space.get())) {
3092 target_color_space = nullptr;
3095 sk_sp<SkImage> yuva_image = SkImage::MakeFromYUVATextures(
3096 context_->GrContext(), yuva_backend_textures,
3097 std::move(decoded_color_space));
3098 if (target_color_space)
3099 return yuva_image->makeColorSpace(target_color_space,
3100 context_->GrContext());
3105 void GpuImageDecodeCache::UpdateMipsIfNeeded(const DrawImage& draw_image,
3106 ImageData* image_data) {
3107 CheckContextLockAcquiredIfNecessary();
3108 // If we already have mips, nothing to do.
3109 if (image_data->needs_mips)
3113 ShouldGenerateMips(draw_image, image_data->upload_scale_mip_level);
3117 image_data->needs_mips = true;
3119 // If we have no uploaded image, nothing to do other than update needs_mips.
3120 // Mips will be generated during later upload.
3121 if (!image_data->HasUploadedData() ||
3122 image_data->mode != DecodedDataMode::kGpu)
3125 if (image_data->yuva_pixmap_info.has_value()) {
3126 // Need to generate mips. Take a reference on the planes we're about to
3127 // delete, delaying deletion.
3128 // TODO(crbug.com/910276): Change after alpha support.
3129 sk_sp<SkImage> previous_y_image = image_data->upload.y_image();
3130 sk_sp<SkImage> previous_u_image = image_data->upload.u_image();
3131 sk_sp<SkImage> previous_v_image = image_data->upload.v_image();
3133 // Generate a new image from the previous, adding mips.
3134 sk_sp<SkImage> image_y_with_mips = previous_y_image->makeTextureImage(
3135 context_->GrContext(), GrMipMapped::kYes);
3136 sk_sp<SkImage> image_u_with_mips = previous_u_image->makeTextureImage(
3137 context_->GrContext(), GrMipMapped::kYes);
3138 sk_sp<SkImage> image_v_with_mips = previous_v_image->makeTextureImage(
3139 context_->GrContext(), GrMipMapped::kYes);
3141 // Handle lost context.
3142 if (!image_y_with_mips || !image_u_with_mips || !image_v_with_mips) {
3143 DLOG(WARNING) << "TODO(crbug.com/740737): Context was lost. Early out.";
3147 // No need to do anything if mipping this image results in the same
3148 // textures. Deleting it below will result in lifetime issues.
3149 // We expect that if one plane mips the same, the others should as well.
3150 if (GlIdFromSkImage(image_y_with_mips.get()) ==
3151 image_data->upload.gl_y_id() &&
3152 GlIdFromSkImage(image_u_with_mips.get()) ==
3153 image_data->upload.gl_u_id() &&
3154 GlIdFromSkImage(image_v_with_mips.get()) ==
3155 image_data->upload.gl_v_id())
3158 // Skia owns our new image planes, take ownership.
3159 sk_sp<SkImage> image_y_with_mips_owned = TakeOwnershipOfSkImageBacking(
3160 context_->GrContext(), std::move(image_y_with_mips));
3161 sk_sp<SkImage> image_u_with_mips_owned = TakeOwnershipOfSkImageBacking(
3162 context_->GrContext(), std::move(image_u_with_mips));
3163 sk_sp<SkImage> image_v_with_mips_owned = TakeOwnershipOfSkImageBacking(
3164 context_->GrContext(), std::move(image_v_with_mips));
3166 // Handle lost context
3167 if (!image_y_with_mips_owned || !image_u_with_mips_owned ||
3168 !image_v_with_mips_owned) {
3169 DLOG(WARNING) << "TODO(crbug.com/740737): Context was lost. Early out.";
3173 int width = image_y_with_mips_owned->width();
3174 int height = image_y_with_mips_owned->height();
3175 sk_sp<SkColorSpace> color_space =
3176 SupportsColorSpaceConversion() &&
3177 draw_image.target_color_space().IsValid()
3178 ? draw_image.target_color_space().ToSkColorSpace()
3180 sk_sp<SkColorSpace> upload_color_space =
3181 ColorSpaceForImageDecode(draw_image, image_data->mode);
3182 sk_sp<SkImage> yuv_image_with_mips_owned =
3183 CreateImageFromYUVATexturesInternal(
3184 image_y_with_mips_owned.get(), image_u_with_mips_owned.get(),
3185 image_v_with_mips_owned.get(), width, height,
3186 image_data->yuva_pixmap_info->yuvaInfo().planeConfig(),
3187 image_data->yuva_pixmap_info->yuvaInfo().subsampling(),
3188 image_data->yuva_pixmap_info->yuvaInfo().yuvColorSpace(),
3189 color_space, upload_color_space);
3190 // In case of lost context
3191 if (!yuv_image_with_mips_owned) {
3192 DLOG(WARNING) << "TODO(crbug.com/740737): Context was lost. Early out.";
3196 // The previous images might be in the in-use cache, potentially held
3197 // externally. We must defer deleting them until the entry is unlocked.
3198 image_data->upload.set_unmipped_image(image_data->upload.image());
3199 image_data->upload.set_unmipped_yuv_images(image_data->upload.y_image(),
3200 image_data->upload.u_image(),
3201 image_data->upload.v_image());
3203 // Set the new image on the cache.
3204 image_data->upload.Reset();
3205 image_data->upload.SetImage(std::move(yuv_image_with_mips_owned));
3206 image_data->upload.SetYuvImage(std::move(image_y_with_mips_owned),
3207 std::move(image_u_with_mips_owned),
3208 std::move(image_v_with_mips_owned));
3209 context_->RasterInterface()->InitializeDiscardableTextureCHROMIUM(
3210 image_data->upload.gl_y_id());
3211 context_->RasterInterface()->InitializeDiscardableTextureCHROMIUM(
3212 image_data->upload.gl_u_id());
3213 context_->RasterInterface()->InitializeDiscardableTextureCHROMIUM(
3214 image_data->upload.gl_v_id());
3215 return; // End YUV mip mapping.
3217 // Begin RGBX mip mapping.
3218 // Need to generate mips. Take a reference on the image we're about to
3219 // delete, delaying deletion.
3220 sk_sp<SkImage> previous_image = image_data->upload.image();
3222 // Generate a new image from the previous, adding mips.
3223 sk_sp<SkImage> image_with_mips = previous_image->makeTextureImage(
3224 context_->GrContext(), GrMipMapped::kYes);
3226 // Handle lost context.
3227 if (!image_with_mips) {
3228 DLOG(WARNING) << "TODO(crbug.com/740737): Context was lost. Early out.";
3232 // No need to do anything if mipping this image results in the same texture.
3233 // Deleting it below will result in lifetime issues.
3234 if (GlIdFromSkImage(image_with_mips.get()) == image_data->upload.gl_id())
3237 // Skia owns our new image, take ownership.
3238 sk_sp<SkImage> image_with_mips_owned = TakeOwnershipOfSkImageBacking(
3239 context_->GrContext(), std::move(image_with_mips));
3241 // Handle lost context
3242 if (!image_with_mips_owned) {
3243 DLOG(WARNING) << "TODO(crbug.com/740737): Context was lost. Early out.";
3247 // The previous image might be in the in-use cache, potentially held
3248 // externally. We must defer deleting it until the entry is unlocked.
3249 image_data->upload.set_unmipped_image(image_data->upload.image());
3251 // Set the new image on the cache.
3252 image_data->upload.Reset();
3253 image_data->upload.SetImage(std::move(image_with_mips_owned));
3254 context_->RasterInterface()->InitializeDiscardableTextureCHROMIUM(
3255 image_data->upload.gl_id());