1 // Copyright 2014 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "components/viz/service/display/surface_aggregator.h"
13 #include "base/auto_reset.h"
14 #include "base/check_op.h"
15 #include "base/containers/adapters.h"
16 #include "base/containers/cxx20_erase.h"
17 #include "base/functional/bind.h"
18 #include "base/logging.h"
19 #include "base/metrics/histogram_macros.h"
20 #include "base/numerics/ranges.h"
21 #include "base/timer/elapsed_timer.h"
22 #include "base/trace_event/trace_event.h"
23 #include "base/trace_event/typed_macros.h"
24 #include "cc/base/math_util.h"
25 #include "components/viz/common/features.h"
26 #include "components/viz/common/quads/aggregated_render_pass.h"
27 #include "components/viz/common/quads/aggregated_render_pass_draw_quad.h"
28 #include "components/viz/common/quads/compositor_frame.h"
29 #include "components/viz/common/quads/compositor_render_pass_draw_quad.h"
30 #include "components/viz/common/quads/debug_border_draw_quad.h"
31 #include "components/viz/common/quads/draw_quad.h"
32 #include "components/viz/common/quads/shared_quad_state.h"
33 #include "components/viz/common/quads/solid_color_draw_quad.h"
34 #include "components/viz/common/quads/surface_draw_quad.h"
35 #include "components/viz/common/quads/texture_draw_quad.h"
36 #include "components/viz/common/quads/yuv_video_draw_quad.h"
37 #include "components/viz/common/surfaces/surface_range.h"
38 #include "components/viz/common/viz_utils.h"
39 #include "components/viz/service/debugger/viz_debugger.h"
40 #include "components/viz/service/display/aggregated_frame.h"
41 #include "components/viz/service/display/display_resource_provider.h"
42 #include "components/viz/service/display/overlay_candidate.h"
43 #include "components/viz/service/display/renderer_utils.h"
44 #include "components/viz/service/display/resolved_frame_data.h"
45 #include "components/viz/service/surfaces/surface.h"
46 #include "components/viz/service/surfaces/surface_allocation_group.h"
47 #include "components/viz/service/surfaces/surface_client.h"
48 #include "components/viz/service/surfaces/surface_manager.h"
49 #include "ui/gfx/geometry/angle_conversions.h"
50 #include "ui/gfx/geometry/rect.h"
51 #include "ui/gfx/geometry/rect_conversions.h"
52 #include "ui/gfx/geometry/rect_f.h"
53 #include "ui/gfx/overlay_transform_utils.h"
57 struct MaskFilterInfoExt {
58 MaskFilterInfoExt() = default;
59 MaskFilterInfoExt(const gfx::MaskFilterInfo& mask_filter_info_arg,
60 bool is_fast_rounded_corner_arg,
61 const gfx::Transform target_transform)
62 : mask_filter_info(mask_filter_info_arg),
63 is_fast_rounded_corner(is_fast_rounded_corner_arg) {
64 mask_filter_info.ApplyTransform(target_transform);
67 // Returns true if the quads from |merge_render_pass| can be merged into
68 // the embedding render pass based on mask filter info.
69 // |parent_target_transform| shall be used to translate mask filter infos of
70 // |merge_render_pass.shared_quad_state_list| in the same coordinate space
71 // as the |mask_filter_info| is.
72 bool CanMergeMaskFilterInfo(
73 const CompositorRenderPass& merge_render_pass,
74 const gfx::Transform& parent_target_transform) const {
75 DCHECK(parent_target_transform.Preserves2dAxisAlignment());
77 // If the embedding quad has no mask filter, then we do not have to block
79 if (mask_filter_info.IsEmpty()) {
83 // If the embedding quad has rounded corner and it is not a fast rounded
84 // corner, we cannot merge.
85 if (mask_filter_info.HasRoundedCorners() && !is_fast_rounded_corner) {
89 // If any of the quads in the render pass to merged has a mask filter of its
90 // own, then we have to check if that has fast rounded corners and they fit
91 // |mask_filter_info|'s ones. In that case, we can merge this render pass.
92 // Merge is impossible in all the other cases.
93 for (const auto* sqs : merge_render_pass.shared_quad_state_list) {
94 if (sqs->mask_filter_info.IsEmpty()) {
98 // We cannot handle rotation with mask filter as rotated content is unable
99 // to apply correct clip.
100 if (!sqs->quad_to_target_transform.Preserves2dAxisAlignment()) {
104 // Those must be fast rounded corners that enables us to squash mask
106 if (sqs->mask_filter_info.HasRoundedCorners() &&
107 !sqs->is_fast_rounded_corner) {
111 if (sqs->mask_filter_info.HasGradientMask()) {
115 // Take the bounds of the sqs filter and apply clipping rect as it may
116 // make current mask fit the |mask_filter_info|'s bounds. Not doing so may
117 // result in marking this mask not suitable for merging while it never
118 // spans outside another mask.
119 auto rounded_corner_bounds = sqs->mask_filter_info.bounds();
120 if (sqs->clip_rect.has_value()) {
121 rounded_corner_bounds.Intersect(gfx::RectF(*sqs->clip_rect));
124 // Before checking if current mask's rounded corners do not intersect with
125 // the upper level rounded corner mask, its system coordinate must be
126 // transformed to that target's system coordinate.
127 rounded_corner_bounds =
128 parent_target_transform.MapRect(rounded_corner_bounds);
130 // This is the only case when quads of this render pass with the mask
131 // filter info that has fast rounded corners set can be merged into the
132 // embedding render pass. So, if they don't intersect with the "toplevel"
133 // rounded corners, we can merge.
134 if (!mask_filter_info.rounded_corner_bounds().Contains(
135 rounded_corner_bounds)) {
142 gfx::MaskFilterInfo mask_filter_info;
143 bool is_fast_rounded_corner;
148 // Used for determine when to treat opacity close to 1.f as opaque. The value is
149 // chosen to be smaller than 1/255.
150 constexpr float kOpacityEpsilon = 0.001f;
152 void MoveMatchingRequests(
153 CompositorRenderPassId render_pass_id,
154 std::multimap<CompositorRenderPassId, std::unique_ptr<CopyOutputRequest>>*
156 std::vector<std::unique_ptr<CopyOutputRequest>>* output_requests) {
157 auto request_range = copy_requests->equal_range(render_pass_id);
158 for (auto it = request_range.first; it != request_range.second; ++it) {
160 output_requests->push_back(std::move(it->second));
162 copy_requests->erase(request_range.first, request_range.second);
165 // Returns true if the damage rect is valid.
166 bool CalculateQuadSpaceDamageRect(
167 const gfx::Transform& quad_to_target_transform,
168 const gfx::Transform& target_to_root_transform,
169 const gfx::Rect& root_damage_rect,
170 gfx::Rect* quad_space_damage_rect) {
171 gfx::Transform quad_to_root_transform =
172 target_to_root_transform * quad_to_target_transform;
173 gfx::Transform inverse_transform;
174 bool inverse_valid = quad_to_root_transform.GetInverse(&inverse_transform);
178 *quad_space_damage_rect = cc::MathUtil::ProjectEnclosingClippedRect(
179 inverse_transform, root_damage_rect);
183 // Create a clip rect for an aggregated quad from the original clip rect and
184 // the clip rect from the surface it's on.
185 absl::optional<gfx::Rect> CalculateClipRect(
186 const absl::optional<gfx::Rect> surface_clip,
187 const absl::optional<gfx::Rect> quad_clip,
188 const gfx::Transform& target_transform) {
189 absl::optional<gfx::Rect> out_clip;
191 out_clip = surface_clip;
194 // TODO(jamesr): This only works if target_transform maps integer
195 // rects to integer rects.
196 gfx::Rect final_clip =
197 cc::MathUtil::MapEnclosingClippedRect(target_transform, *quad_clip);
199 out_clip->Intersect(final_clip);
201 out_clip = final_clip;
207 // Creates a new SharedQuadState in |dest_render_pass| based on |source_sqs|
208 // plus additional modified values.
209 // - |source_sqs| is the SharedQuadState to copy from.
210 // - |quad_to_target_transform| replaces the equivalent |source_sqs| value.
211 // - |target_transform| is an additional transform to add. Used when merging the
212 // root render pass of a surface into the embedding render pass.
213 // - |quad_layer_rect| replaces the equivalent |source_sqs| value.
214 // - |visible_quad_layer_rect| replaces the equivalent |source_sqs| value.
215 // - |mask_filter_info_ext| replaces the equivalent |source_sqs| values.
216 // - |added_clip_rect| is an additional clip rect added to the quad clip rect.
217 // - |dest_render_pass| is where the new SharedQuadState will be created.
218 SharedQuadState* CopyAndScaleSharedQuadState(
219 const SharedQuadState* source_sqs,
220 uint32_t client_namespace_id,
221 const gfx::Transform& quad_to_target_transform,
222 const gfx::Transform& target_transform,
223 const gfx::Rect& quad_layer_rect,
224 const gfx::Rect& visible_quad_layer_rect,
225 const absl::optional<gfx::Rect> added_clip_rect,
226 const MaskFilterInfoExt& mask_filter_info_ext,
227 AggregatedRenderPass* dest_render_pass) {
228 auto* shared_quad_state = dest_render_pass->CreateAndAppendSharedQuadState();
229 auto new_clip_rect = CalculateClipRect(added_clip_rect, source_sqs->clip_rect,
232 // target_transform contains any transformation that may exist
233 // between the context that these quads are being copied from (i.e. the
234 // surface's draw transform when aggregated from within a surface) to the
235 // target space of the pass. This will be identity except when copying the
236 // root draw pass from a surface into a pass when the surface draw quad's
237 // transform is not identity.
238 gfx::Transform new_transform = quad_to_target_transform;
239 new_transform.PostConcat(target_transform);
241 shared_quad_state->SetAll(
242 new_transform, quad_layer_rect, visible_quad_layer_rect,
243 mask_filter_info_ext.mask_filter_info, new_clip_rect,
244 source_sqs->are_contents_opaque, source_sqs->opacity,
245 source_sqs->blend_mode, source_sqs->sorting_context_id,
246 source_sqs->layer_id, mask_filter_info_ext.is_fast_rounded_corner);
247 shared_quad_state->layer_namespace_id = client_namespace_id;
248 return shared_quad_state;
251 // Creates a new SharedQuadState in |dest_render_pass| and copies |source_sqs|
252 // into it. See CopyAndScaleSharedQuadState() for full documentation.
253 SharedQuadState* CopySharedQuadState(
254 const SharedQuadState* source_sqs,
255 uint32_t client_namespace_id,
256 const gfx::Transform& target_transform,
257 const absl::optional<gfx::Rect> added_clip_rect,
258 const MaskFilterInfoExt& mask_filter_info,
259 AggregatedRenderPass* dest_render_pass) {
260 return CopyAndScaleSharedQuadState(
261 source_sqs, client_namespace_id, source_sqs->quad_to_target_transform,
262 target_transform, source_sqs->quad_layer_rect,
263 source_sqs->visible_quad_layer_rect, added_clip_rect, mask_filter_info,
267 void UpdatePersistentPassDataMergeState(ResolvedPassData& resolved_pass,
268 AggregatedRenderPass* dest_pass,
269 bool is_merged_pass) {
270 auto& persistent_data = resolved_pass.current_persistent_data();
272 PersistentPassData::MergeState merge_state =
273 is_merged_pass ? PersistentPassData::kAlwaysMerged
274 : PersistentPassData::kNotMerged;
276 if (persistent_data.merge_state == PersistentPassData::kInitState) {
277 // This is the first time it's embedded.
278 persistent_data.merge_state = merge_state;
279 } else if (persistent_data.merge_state != merge_state) {
280 persistent_data.merge_state = PersistentPassData::kSomeTimesMerged;
284 bool ChangeInMergeState(ResolvedPassData& resolved_pass) {
285 DCHECK(resolved_pass.current_persistent_data().merge_state !=
286 PersistentPassData::kInitState);
287 // If this is the first frame and previous_merge_state is empty,
288 // this function will returns false.
289 auto current_merge_state =
290 resolved_pass.current_persistent_data().merge_state;
291 auto previous_merge_state =
292 resolved_pass.previous_persistent_data().merge_state;
294 // Check if this render pass is merged to its parent render pass in the
295 // previous frame but is not in the current frame.
296 bool change_in_merged_pass =
297 previous_merge_state == PersistentPassData::kAlwaysMerged &&
298 current_merge_state == PersistentPassData::kNotMerged;
300 // If it's embedded multiple times and some are merged while some are not,
301 // just redraw the render pass. It's complicated to track individual change.
302 change_in_merged_pass |=
303 resolved_pass.current_persistent_data().merge_state ==
304 PersistentPassData::kSomeTimesMerged ||
305 resolved_pass.previous_persistent_data().merge_state ==
306 PersistentPassData::kSomeTimesMerged;
308 return change_in_merged_pass;
311 void UpdateNeedsRedraw(
312 ResolvedPassData& resolved_pass,
313 AggregatedRenderPass* dest_pass,
314 const absl::optional<gfx::Rect> dest_root_target_clip_rect) {
315 // |dest_root_target_clip_rect| is the bounding box on the root surface where
316 // this render pass can be rendered into. It includes all ancestors' render
317 // pass output rects, RenderPassDrawQuad rect, SurfaceDrawQuad rect, and clip
319 DCHECK(dest_root_target_clip_rect.has_value());
321 // Save the parent_clip_rect from the current frame.
322 auto& current_parent_clip_rect =
323 resolved_pass.current_persistent_data().parent_clip_rect;
324 current_parent_clip_rect.Union(dest_root_target_clip_rect.value());
326 // Get the parent_clip_rect from the preious frame;
327 auto& previous_parent_clip_rect =
328 resolved_pass.previous_persistent_data().parent_clip_rect;
330 // If the parent clip rect expands, the new area of the render pass output
331 // buffer has never been updated. Redraw is needed.
332 bool parent_clip_rect_expands =
333 !previous_parent_clip_rect.Contains(current_parent_clip_rect);
335 // Whether the render pass is merged with its parent render pass and changes.
336 bool change_in_merged_pass = ChangeInMergeState(resolved_pass);
338 // 1. Needs redraw when the current parent clip rect expands from the
340 // 2. Needs full damage and redraw when it switched from merged to
342 // 3. Needs full damage and redraw when it is in_copy_request_pass.
343 if (parent_clip_rect_expands ||
344 resolved_pass.aggregation().in_copy_request_pass ||
345 change_in_merged_pass) {
346 dest_pass->has_damage_from_contributing_content = true;
350 bool RenderPassNeedsFullDamage(ResolvedPassData& resolved_pass) {
351 auto& aggregation = resolved_pass.aggregation();
353 const bool can_skip_render_pass = base::FeatureList::IsEnabled(
354 features::kAllowUndamagedNonrootRenderPassToSkip);
355 if (can_skip_render_pass) {
356 // Needs full damage when
357 // 1. The render pass pixels will be saved, either by a copy request or into
358 // a cached render pass. This avoids a partially drawn render pass being
360 // 2. A render pass is merged to its parent render pass in the previous
361 // frame but it's not in this frame.
362 return aggregation.in_cached_render_pass ||
363 aggregation.in_copy_request_pass ||
364 aggregation.in_pixel_moving_filter_pass ||
365 ChangeInMergeState(resolved_pass);
367 // Returns true if |resolved_pass| needs full damage. This is because:
368 // 1. The render pass pixels will be saved, either by a copy request or into
369 // a cached render pass. This avoids a partially drawn render pass being
371 // 2. The render pass pixels will have a pixel moving foreground filter
372 // applied to them. In this case pixels outside the damage_rect can be
373 // moved inside the damage_rect by the filter.
375 return aggregation.in_cached_render_pass ||
376 aggregation.in_copy_request_pass ||
377 aggregation.in_pixel_moving_filter_pass;
381 // Computes an enclosing rect in target render pass coordinate space that bounds
382 // where |quad| may contribute pixels. This rect is computed by transforming the
383 // quads |visible_rect|, which is known to be contained by the quads |rect|, and
384 // transforming it into target render pass coordinate space. The rect is then
385 // clipped by SharedQuadState |clip_rect| if one exists.
387 // Since a quad can only damage pixels it can draw to, the drawable rect is also
388 // the maximum damage rect a quad can contribute (ignoring pixel-moving
390 gfx::Rect ComputeDrawableRectForQuad(const DrawQuad* quad) {
391 const SharedQuadState* sqs = quad->shared_quad_state;
393 gfx::Rect drawable_rect = cc::MathUtil::MapEnclosingClippedRect(
394 sqs->quad_to_target_transform, quad->visible_rect);
396 drawable_rect.Intersect(*sqs->clip_rect);
398 return drawable_rect;
401 // This function transforms a rect from its target space to the destination
402 // root target space. If clip_rect is valid, clipping is applied after
404 gfx::Rect TransformRectToDestRootTargetSpace(
405 const gfx::Rect& rect_in_target_space,
406 const gfx::Transform& target_to_dest_transform,
407 const gfx::Transform& dest_to_root_target_transform,
408 const absl::optional<gfx::Rect> dest_root_target_clip_rect) {
409 gfx::Transform target_to_dest_root_target_transform =
410 dest_to_root_target_transform * target_to_dest_transform;
412 gfx::Rect rect_in_root_target_space = cc::MathUtil::MapEnclosingClippedRect(
413 target_to_dest_root_target_transform, rect_in_target_space);
415 if (dest_root_target_clip_rect) {
416 rect_in_root_target_space.Intersect(*dest_root_target_clip_rect);
419 return rect_in_root_target_space;
424 constexpr base::TimeDelta SurfaceAggregator::kHistogramMinTime;
425 constexpr base::TimeDelta SurfaceAggregator::kHistogramMaxTime;
427 struct SurfaceAggregator::PrewalkResult {
428 // This is the set of Surfaces that were referenced by another Surface, but
429 // not included in a SurfaceDrawQuad.
430 base::flat_set<SurfaceId> undrawn_surfaces;
431 bool frame_sinks_changed = false;
432 bool page_fullscreen_mode = false;
433 #if BUILDFLAG(IS_EFL)
434 bool can_skip_flush = false;
436 gfx::ContentColorUsage content_color_usage = gfx::ContentColorUsage::kSRGB;
439 SurfaceAggregator::SurfaceAggregator(
440 SurfaceManager* manager,
441 DisplayResourceProvider* provider,
442 bool aggregate_only_damaged,
443 bool needs_surface_damage_rect_list,
444 ExtraPassForReadbackOption extra_pass_option)
447 aggregate_only_damaged_(aggregate_only_damaged),
448 needs_surface_damage_rect_list_(needs_surface_damage_rect_list),
449 extra_pass_for_readback_option_(extra_pass_option) {
452 manager_->AddObserver(this);
455 SurfaceAggregator::~SurfaceAggregator() {
456 manager_->RemoveObserver(this);
458 contained_surfaces_.clear();
459 contained_frame_sinks_.clear();
461 // Notify client of all surfaces being removed.
462 ProcessAddedAndRemovedSurfaces();
465 // This function is called at each render pass - CopyQuadsToPass().
466 void SurfaceAggregator::AddRenderPassFilterDamageToDamageList(
467 const ResolvedFrameData& resolved_frame,
468 const CompositorRenderPassDrawQuad* render_pass_quad,
469 const gfx::Transform& parent_target_transform,
470 const absl::optional<gfx::Rect> dest_root_target_clip_rect,
471 const gfx::Transform& dest_transform_to_root_target) {
472 const CompositorRenderPassId child_pass_id = render_pass_quad->render_pass_id;
473 const ResolvedPassData& child_resolved_pass =
474 resolved_frame.GetRenderPassDataById(child_pass_id);
475 const CompositorRenderPass& child_render_pass =
476 child_resolved_pass.render_pass();
478 // Add damages from render passes with pixel-moving foreground filters or
479 // backdrop filters to the surface damage list.
480 if (!child_render_pass.filters.HasFilterThatMovesPixels() &&
481 !child_render_pass.backdrop_filters.HasFilterThatMovesPixels()) {
485 gfx::Rect damage_rect = render_pass_quad->rect;
486 gfx::Rect damage_rect_in_target_space;
487 if (child_render_pass.filters.HasFilterThatMovesPixels()) {
488 // The size of pixel-moving foreground filter is allowed to expand.
489 // No intersecting shared_quad_state->clip_rect for the expanded rect.
490 damage_rect_in_target_space =
491 GetExpandedRectWithPixelMovingForegroundFilter(
492 *render_pass_quad, child_render_pass.filters);
493 } else if (child_render_pass.backdrop_filters.HasFilterThatMovesPixels()) {
494 const auto* shared_quad_state = render_pass_quad->shared_quad_state;
495 damage_rect_in_target_space = cc::MathUtil::MapEnclosingClippedRect(
496 shared_quad_state->quad_to_target_transform, damage_rect);
497 if (shared_quad_state->clip_rect) {
498 damage_rect_in_target_space.Intersect(
499 shared_quad_state->clip_rect.value());
503 gfx::Rect damage_rect_in_root_target_space =
504 TransformRectToDestRootTargetSpace(
505 damage_rect_in_target_space, parent_target_transform,
506 dest_transform_to_root_target, dest_root_target_clip_rect);
508 // The whole render pass rect with pixel-moving foreground filters or
509 // backdrop filters is considered damaged if it intersects with the other
511 if (damage_rect_in_root_target_space.Intersects(root_damage_rect_)) {
512 // Since |damage_rect_in_root_target_space| is available, just pass this
513 // rect and reset the other arguments.
514 AddSurfaceDamageToDamageList(
515 damage_rect_in_root_target_space,
516 /*parent_target_transform*/ gfx::Transform(),
517 /*dest_root_target_clip_rect*/ {},
518 /*dest_transform_to_root_target*/ gfx::Transform(),
519 /*resolved_frame=*/nullptr);
523 void SurfaceAggregator::AddSurfaceDamageToDamageList(
524 const gfx::Rect& default_damage_rect,
525 const gfx::Transform& parent_target_transform,
526 const absl::optional<gfx::Rect> dest_root_target_clip_rect,
527 const gfx::Transform& dest_transform_to_root_target,
528 ResolvedFrameData* resolved_frame) {
529 gfx::Rect damage_rect;
530 if (!resolved_frame) {
531 // When the surface is null, it's either the surface is lost or it comes
532 // from a render pass with filters.
533 damage_rect = default_damage_rect;
535 if (RenderPassNeedsFullDamage(resolved_frame->GetRootRenderPassData())) {
536 damage_rect = resolved_frame->GetOutputRect();
538 damage_rect = resolved_frame->GetSurfaceDamage();
542 if (damage_rect.IsEmpty()) {
543 current_zero_damage_rect_is_not_recorded_ = true;
546 current_zero_damage_rect_is_not_recorded_ = false;
548 gfx::Rect damage_rect_in_root_target_space =
549 TransformRectToDestRootTargetSpace(
550 /*rect_in_target_space=*/damage_rect, parent_target_transform,
551 dest_transform_to_root_target, dest_root_target_clip_rect);
553 surface_damage_rect_list_->push_back(damage_rect_in_root_target_space);
556 // This function returns the overlay candidate quad ptr which has an
557 // overlay_damage_index pointing to the its damage rect in
558 // surface_damage_rect_list_. |overlay_damage_index| will be saved in the shared
560 // This function is called at CopyQuadsToPass().
561 const DrawQuad* SurfaceAggregator::FindQuadWithOverlayDamage(
562 const CompositorRenderPass& source_pass,
563 AggregatedRenderPass* dest_pass,
564 const gfx::Transform& parent_target_transform,
565 const Surface* surface,
566 size_t* overlay_damage_index) {
567 // Only process the damage rect at the root render pass, once per surface.
568 const CompositorFrame& frame = surface->GetActiveFrame();
569 bool is_last_pass_on_src_surface =
570 &source_pass == frame.render_pass_list.back().get();
571 if (!is_last_pass_on_src_surface)
574 // The occluding damage optimization currently relies on two things - there
575 // can't be any damage above the quad within the surface, and the quad needs
576 // its own SQS for the occluding_damage_rect metadata.
577 const DrawQuad* target_quad = nullptr;
578 for (auto* quad : source_pass.quad_list) {
579 // Quads with |per_quad_damage| do not contribute to the |damage_rect| in
580 // the |source_pass|. These quads are also assumed to have unique SQS
582 if (source_pass.has_per_quad_damage) {
583 auto optional_damage = GetOptionalDamageRectFromQuad(quad);
584 if (optional_damage.has_value()) {
589 if (target_quad == nullptr) {
592 // More that one quad without per_quad_damage.
593 target_quad = nullptr;
598 // No overlay candidate is found.
602 // Surface damage for a render pass quad does not include damage from its
603 // children. We skip this quad to avoid the incomplete damage association.
604 if (target_quad->material == DrawQuad::Material::kCompositorRenderPass ||
605 target_quad->material == DrawQuad::Material::kSurfaceContent)
608 // Zero damage is not recorded in the surface_damage_rect_list_.
609 // In this case, add an empty damage rect to the list so
610 // |overlay_damage_index| can save this index.
611 if (current_zero_damage_rect_is_not_recorded_) {
612 current_zero_damage_rect_is_not_recorded_ = false;
613 surface_damage_rect_list_->push_back(gfx::Rect());
616 // Before assigning a surface damage rect to this quad, make sure that it is
617 // not larger than the quad itself. This is possible when a quad is smaller
618 // than it was last frame, or when it moves. The damage should be the size of
619 // larger rect from last frame because we need to damage what's underneath the
620 // quad. So if we promote the now smaller quad to an overlay this frame we
621 // should not remove this damage rect. i.e. we should not assign the damage
622 // rect to this quad.
623 // For similar reasons, we should not assign damage to quads with non-axis
624 // aligned transforms, because those won't be promoted to overlay.
625 auto& damage_rect_in_target_space = surface_damage_rect_list_->back();
626 if (!damage_rect_in_target_space.IsEmpty()) {
627 gfx::Transform transform =
628 parent_target_transform *
629 target_quad->shared_quad_state->quad_to_target_transform;
630 if (!transform.Preserves2dAxisAlignment()) {
634 gfx::RectF rect_in_target_space =
635 cc::MathUtil::MapClippedRect(transform, gfx::RectF(target_quad->rect));
636 // Because OverlayCandidate.damage_rect is a gfx::Rect, we can't really
637 // assign damage if the display_rect is not pixel-aligned.
638 if (!gfx::IsNearestRectWithinDistance(rect_in_target_space, 0.01f)) {
641 if (!rect_in_target_space.Contains(
642 gfx::RectF(damage_rect_in_target_space))) {
647 // The latest surface damage rect.
648 *overlay_damage_index = surface_damage_rect_list_->size() - 1;
653 bool SurfaceAggregator::CanPotentiallyMergePass(
654 const SurfaceDrawQuad& surface_quad) {
655 const SharedQuadState* sqs = surface_quad.shared_quad_state;
656 return surface_quad.allow_merge &&
657 base::IsApproximatelyEqual(sqs->opacity, 1.f, kOpacityEpsilon);
660 void SurfaceAggregator::OnSurfaceDestroyed(const SurfaceId& surface_id) {
661 DCHECK(!is_inside_aggregate_);
663 auto iter = resolved_frames_.find(surface_id);
664 if (iter != resolved_frames_.end()) {
665 TRACE_EVENT0("viz", "SurfaceAggregator::SurfaceDestroyed");
666 resolved_frames_.erase(iter);
670 const ResolvedFrameData* SurfaceAggregator::GetLatestFrameData(
671 const SurfaceId& surface_id) {
672 DCHECK(!is_inside_aggregate_);
673 return GetResolvedFrame(surface_id);
676 ResolvedFrameData* SurfaceAggregator::GetResolvedFrame(
677 const SurfaceRange& range) {
678 // Find latest in flight surface and cache that result for the duration of
679 // this aggregation, then find ResolvedFrameData for that surface.
680 auto iter = resolved_surface_ranges_.find(range);
681 if (iter == resolved_surface_ranges_.end()) {
682 auto* surface = manager_->GetLatestInFlightSurface(range);
683 SurfaceId surface_id = surface ? surface->surface_id() : SurfaceId();
684 iter = resolved_surface_ranges_.emplace(range, surface_id).first;
687 if (!iter->second.is_valid()) {
688 // There is no surface for `range`.
692 return GetResolvedFrame(iter->second);
695 ResolvedFrameData* SurfaceAggregator::GetResolvedFrame(
696 const SurfaceId& surface_id) {
697 DCHECK(surface_id.is_valid());
699 auto iter = resolved_frames_.find(surface_id);
700 if (iter == resolved_frames_.end()) {
701 auto* surface = manager_->GetSurfaceForId(surface_id);
702 if (!surface || !surface->HasActiveFrame()) {
703 // If there is no resolved surface or the surface has no active frame
704 // there is no resolved frame data to return.
708 AggregatedRenderPassId prev_root_pass_id;
709 uint64_t prev_frame_index = 0u;
710 // If this is the first frame in a new surface there might be damage
711 // compared to the previous frame in a different surface.
712 if (surface->surface_id() != surface->previous_frame_surface_id()) {
713 auto prev_resolved_frame_iter =
714 resolved_frames_.find(surface->previous_frame_surface_id());
715 if (prev_resolved_frame_iter != resolved_frames_.end()) {
717 prev_resolved_frame_iter->second.previous_frame_index();
719 prev_resolved_frame_iter->second.GetRootRenderPassData()
724 iter = resolved_frames_
726 std::piecewise_construct, std::forward_as_tuple(surface_id),
727 std::forward_as_tuple(provider_, surface, prev_frame_index,
732 ResolvedFrameData& resolved_frame = iter->second;
733 Surface* surface = resolved_frame.surface();
735 if (is_inside_aggregate_ && !resolved_frame.WasUsedInAggregation()) {
736 // Mark the frame as used this aggregation so it persists.
737 resolved_frame.MarkAsUsedInAggregation();
739 // If there is a new CompositorFrame for `surface` compute resolved frame
740 // data for the new resolved CompositorFrame.
741 if (resolved_frame.previous_frame_index() !=
742 surface->GetActiveFrameIndex()) {
743 base::ElapsedTimer timer;
744 ProcessResolvedFrame(resolved_frame);
745 stats_->declare_resources_time += timer.Elapsed();
749 return &resolved_frame;
752 void SurfaceAggregator::HandleSurfaceQuad(
753 const CompositorRenderPass& source_pass,
754 const SurfaceDrawQuad* surface_quad,
755 uint32_t embedder_client_namespace_id,
756 float parent_device_scale_factor,
757 const gfx::Transform& target_transform,
758 const absl::optional<gfx::Rect> added_clip_rect,
759 const absl::optional<gfx::Rect> dest_root_target_clip_rect,
760 AggregatedRenderPass* dest_pass,
761 bool ignore_undamaged,
762 gfx::Rect* damage_rect_in_quad_space,
763 bool* damage_rect_in_quad_space_valid,
764 const MaskFilterInfoExt& mask_filter_info) {
765 DCHECK(target_transform.Preserves2dAxisAlignment());
767 SurfaceId primary_surface_id = surface_quad->surface_range.end();
768 ResolvedFrameData* resolved_frame =
769 GetResolvedFrame(surface_quad->surface_range);
771 // |added_clip_rect| should be bounded by the output_rect of the render pass
772 // that contains |surface_quad|.
773 absl::optional<gfx::Rect> surface_clip_rect = CalculateClipRect(
774 added_clip_rect, source_pass.output_rect, target_transform);
776 // If a new surface is going to be emitted, add the surface_quad rect to
777 // |surface_damage_rect_list_| for overlays. The whole quad is considered
779 absl::optional<gfx::Rect> combined_clip_rect;
781 gfx::Rect surface_in_target_space = ComputeDrawableRectForQuad(surface_quad);
782 surface_in_target_space.Intersect(source_pass.output_rect);
784 if (needs_surface_damage_rect_list_ &&
785 (!resolved_frame || resolved_frame->surface_id() != primary_surface_id)) {
786 // If using a fallback surface the surface content may be stretched or
787 // have gutter. If the surface is missing the content will be filled
788 // with a solid color. In both cases we no longer have frame-to-frame
789 // damage so treat the entire SurfaceDrawQuad visible_rect as damaged.
790 // |combined_clip_rect| is the transforming and clipping result of the
791 // entire SurfaceDrawQuad visible_rect on the root target space of the
793 AddSurfaceDamageToDamageList(surface_in_target_space, target_transform,
794 dest_root_target_clip_rect,
795 dest_pass->transform_to_root_target,
796 /*resolved_frame=*/nullptr);
799 // combined_clip_rect is the result of |dest_root_target_clip_rect|
800 // intersecting |surface_quad| on the root target space of the root surface.
801 combined_clip_rect = TransformRectToDestRootTargetSpace(
802 /*rect_in_target_space=*/surface_in_target_space, target_transform,
803 dest_pass->transform_to_root_target, dest_root_target_clip_rect);
805 // If there's no fallback surface ID available, then simply emit a
806 // SolidColorDrawQuad with the provided default background color. This
807 // can happen after a Viz process crash.
808 if (!resolved_frame) {
809 EmitDefaultBackgroundColorQuad(surface_quad, embedder_client_namespace_id,
810 target_transform, surface_clip_rect,
811 dest_pass, mask_filter_info);
815 if (resolved_frame->surface_id() != primary_surface_id &&
816 !surface_quad->stretch_content_to_fill_bounds) {
817 const CompositorFrame& fallback_frame =
818 resolved_frame->surface()->GetActiveOrInterpolatedFrame();
820 gfx::Rect fallback_rect(fallback_frame.size_in_pixels());
823 parent_device_scale_factor / fallback_frame.device_scale_factor();
825 gfx::ScaleToEnclosingRect(fallback_rect, scale_ratio, scale_ratio);
827 gfx::IntersectRects(fallback_rect, surface_quad->visible_rect);
829 // TODO(crbug.com/1308932): CompositorFrameMetadata to SkColor4f
830 EmitGutterQuadsIfNecessary(surface_quad->visible_rect, fallback_rect,
831 surface_quad->shared_quad_state,
832 embedder_client_namespace_id, target_transform,
834 fallback_frame.metadata.root_background_color,
835 dest_pass, mask_filter_info);
838 EmitSurfaceContent(*resolved_frame, parent_device_scale_factor, surface_quad,
839 embedder_client_namespace_id, target_transform,
840 surface_clip_rect, combined_clip_rect, dest_pass,
841 ignore_undamaged, damage_rect_in_quad_space,
842 damage_rect_in_quad_space_valid, mask_filter_info);
845 void SurfaceAggregator::EmitSurfaceContent(
846 ResolvedFrameData& resolved_frame,
847 float parent_device_scale_factor,
848 const SurfaceDrawQuad* surface_quad,
849 uint32_t embedder_client_namespace_id,
850 const gfx::Transform& target_transform,
851 const absl::optional<gfx::Rect> added_clip_rect,
852 const absl::optional<gfx::Rect> dest_root_target_clip_rect,
853 AggregatedRenderPass* dest_pass,
854 bool ignore_undamaged,
855 gfx::Rect* damage_rect_in_quad_space,
856 bool* damage_rect_in_quad_space_valid,
857 const MaskFilterInfoExt& mask_filter_info) {
858 Surface* surface = resolved_frame.surface();
860 // If this surface's id is already in our referenced set then it creates
861 // a cycle in the graph and should be dropped.
862 SurfaceId surface_id = surface->surface_id();
863 if (referenced_surfaces_.count(surface_id))
866 ++stats_->copied_surface_count;
868 const CompositorFrame& frame = surface->GetActiveOrInterpolatedFrame();
870 // If we are stretching content to fill the SurfaceDrawQuad, or if the device
871 // scale factor mismatches between content and SurfaceDrawQuad, we appply an
873 float extra_content_scale_x, extra_content_scale_y;
874 if (surface_quad->stretch_content_to_fill_bounds) {
875 const gfx::Rect& surface_quad_rect = surface_quad->rect;
876 // Stretches the surface contents to exactly fill the layer bounds,
877 // regardless of scale or aspect ratio differences.
878 extra_content_scale_x = surface_quad_rect.width() /
879 static_cast<float>(frame.size_in_pixels().width());
880 extra_content_scale_y = surface_quad_rect.height() /
881 static_cast<float>(frame.size_in_pixels().height());
883 extra_content_scale_x = extra_content_scale_y =
884 parent_device_scale_factor / frame.device_scale_factor();
886 float inverse_extra_content_scale_x = SK_Scalar1 / extra_content_scale_x;
887 float inverse_extra_content_scale_y = SK_Scalar1 / extra_content_scale_y;
889 const SharedQuadState* surface_quad_sqs = surface_quad->shared_quad_state;
890 gfx::Transform scaled_quad_to_target_transform(
891 surface_quad_sqs->quad_to_target_transform);
892 scaled_quad_to_target_transform.Scale(extra_content_scale_x,
893 extra_content_scale_y);
896 "viz,benchmark,graphics.pipeline", "Graphics.Pipeline",
897 perfetto::TerminatingFlow::Global(
898 frame.metadata.begin_frame_ack.trace_id),
899 perfetto::Flow::Global(display_trace_id_),
900 [trace_id = display_trace_id_](perfetto::EventContext ctx) {
901 auto* event = ctx.event<perfetto::protos::pbzero::ChromeTrackEvent>();
902 auto* data = event->set_chrome_graphics_pipeline();
903 data->set_step(perfetto::protos::pbzero::ChromeGraphicsPipeline::
904 StepName::STEP_SURFACE_AGGREGATION);
905 data->set_display_trace_id(trace_id);
908 const gfx::Rect& surface_quad_visible_rect = surface_quad->visible_rect;
909 if (ignore_undamaged) {
910 gfx::Transform quad_to_target_transform =
911 target_transform * surface_quad_sqs->quad_to_target_transform;
912 *damage_rect_in_quad_space_valid = CalculateQuadSpaceDamageRect(
913 quad_to_target_transform, dest_pass->transform_to_root_target,
914 root_damage_rect_, damage_rect_in_quad_space);
915 if (*damage_rect_in_quad_space_valid &&
916 !damage_rect_in_quad_space->Intersects(surface_quad_visible_rect)) {
921 // A map keyed by RenderPass id.
922 Surface::CopyRequestsMap copy_requests;
923 if (take_copy_requests_)
924 surface->TakeCopyOutputRequests(©_requests);
926 const CompositorRenderPassList& render_pass_list = frame.render_pass_list;
927 if (!resolved_frame.is_valid()) {
928 // As |copy_requests| goes out-of-scope, all copy requests in that container
929 // will auto-send an empty result upon destruction.
933 referenced_surfaces_.insert(surface_id);
935 gfx::Transform combined_transform = scaled_quad_to_target_transform;
936 combined_transform.PostConcat(target_transform);
938 // If the SurfaceDrawQuad is marked as being reflected and surface contents
939 // are going to be scaled then keep the RenderPass. This allows the reflected
940 // surface to be drawn with AA enabled for smooth scaling and preserves the
941 // original reflector scaling behaviour which scaled a TextureLayer.
942 bool reflected_and_scaled =
943 surface_quad->is_reflection &&
944 !scaled_quad_to_target_transform.IsIdentityOrTranslation();
946 const bool pass_is_mergeable =
947 CanPotentiallyMergePass(*surface_quad) && !reflected_and_scaled &&
948 combined_transform.Preserves2dAxisAlignment() &&
949 mask_filter_info.CanMergeMaskFilterInfo(*render_pass_list.back(),
952 // When a surface has video capture enabled, but no copy requests, we do not
953 // require an intermediate surface. However, video capture being enabled is a
954 // hint that we will have a copy request soon, so we prevent |merge_pass| to
955 // avoid thrashing on the render pass backing allocation.
956 const bool has_video_capture =
957 !copy_requests.empty() || surface->IsVideoCaptureOnFromClient();
959 const bool merge_pass = pass_is_mergeable && !has_video_capture;
961 // Update PersistentPassData.merge_status of the root render pass of the
962 // current frame before making a call to AddSurfaceDamageToDamageList() where
963 // RenderPassNeedsFullDamage() is called and needs root pass |merge_state|
965 UpdatePersistentPassDataMergeState(resolved_frame.GetRootRenderPassData(),
966 dest_pass, merge_pass);
968 if (needs_surface_damage_rect_list_ && resolved_frame.WillDraw()) {
969 AddSurfaceDamageToDamageList(
970 /*default_damage_rect =*/gfx::Rect(), combined_transform,
971 dest_root_target_clip_rect, dest_pass->transform_to_root_target,
975 if (frame.metadata.delegated_ink_metadata) {
976 // Copy delegated ink metadata from the compositor frame metadata. This
977 // prevents the delegated ink trail from flickering if a compositor frame
978 // is not generated due to a delayed main frame.
979 TransformAndStoreDelegatedInkMetadata(
980 dest_pass->transform_to_root_target * combined_transform,
981 frame.metadata.delegated_ink_metadata.get());
984 // TODO(fsamuel): Move this to a separate helper function.
985 auto& resolved_passes = resolved_frame.GetResolvedPasses();
986 size_t num_render_passes = resolved_passes.size();
987 size_t passes_to_copy =
988 merge_pass ? num_render_passes - 1 : num_render_passes;
989 for (size_t j = 0; j < passes_to_copy; ++j) {
990 ResolvedPassData& resolved_pass = resolved_passes[j];
991 const CompositorRenderPass& source = resolved_pass.render_pass();
993 size_t sqs_size = source.shared_quad_state_list.size();
994 size_t dq_size = source.quad_list.size();
995 auto copy_pass = std::make_unique<AggregatedRenderPass>(sqs_size, dq_size);
997 gfx::Rect output_rect = source.output_rect;
998 if (max_render_target_size_ > 0) {
999 output_rect.set_width(
1000 std::min(output_rect.width(), max_render_target_size_));
1001 output_rect.set_height(
1002 std::min(output_rect.height(), max_render_target_size_));
1005 resolved_pass.remapped_id(), output_rect, output_rect,
1006 source.transform_to_root_target, source.filters,
1007 source.backdrop_filters, source.backdrop_filter_bounds,
1008 root_content_color_usage_, source.has_transparent_background,
1009 #if defined(TIZEN_VIDEO_HOLE)
1010 source.has_video_hole,
1012 source.cache_render_pass, resolved_pass.aggregation().has_damage,
1013 source.generate_mipmap);
1015 UpdatePersistentPassDataMergeState(resolved_pass, copy_pass.get(),
1016 /*is_merged_pass=*/false);
1018 MoveMatchingRequests(source.id, ©_requests, ©_pass->copy_requests);
1020 // Contributing passes aggregated in to the pass list need to take the
1021 // transform of the surface quad into account to update their transform to
1022 // the root surface.
1023 copy_pass->transform_to_root_target.PostConcat(combined_transform);
1024 copy_pass->transform_to_root_target.PostConcat(
1025 dest_pass->transform_to_root_target);
1027 CopyQuadsToPass(resolved_frame, resolved_pass, copy_pass.get(),
1028 frame.device_scale_factor(), gfx::Transform(), {},
1029 dest_root_target_clip_rect, surface, MaskFilterInfoExt());
1031 SetRenderPassDamageRect(copy_pass.get(), resolved_pass);
1033 dest_pass_list_->push_back(std::move(copy_pass));
1036 if (surface->IsVideoCaptureOnFromClient()) {
1038 dest_pass_list_->back()->video_capture_enabled = true;
1041 const auto& last_pass = *render_pass_list.back();
1042 auto& resolved_root_pass = resolved_frame.GetRootRenderPassData();
1044 // This hack allows for quads that require overlay to appear in a render pass
1045 // for a copy request as well as be merged into the dest pass (eventually the
1046 // root) to be promoted to overlay. This allows e.g. protected content to be
1047 // visible to the user, even if something is capturing the tab (the protected
1048 // content will still not appear in the capture). Note this does not handle
1049 // the case when the root pass is captured with protected content, which needs
1050 // to be handled during overlay processing.
1052 // It works by preventing merging when there is a copy request (as usual), so
1053 // we have an intermediate render pass (and backing) that can service the copy
1054 // request. Then, we detect here if the render pass has quads that require
1055 // overlay and could've otherwise merged. If so, we force a merge, resulting
1056 // in a copy of the render pass quads in the intermediate pass and a copy in
1057 // the dest pass. Since we are not copying the copy request itself to the dest
1058 // pass, the quads that require overlay can still be promoted to overlay.
1059 const bool allow_forced_merge_pass = base::FeatureList::IsEnabled(
1060 features::kAllowForceMergeRenderPassWithRequireOverlayQuads);
1061 const bool force_merge_pass =
1062 allow_forced_merge_pass && !merge_pass && pass_is_mergeable &&
1063 base::ranges::any_of(dest_pass_list_->back()->quad_list,
1064 &OverlayCandidate::RequiresOverlay);
1066 if (merge_pass || force_merge_pass) {
1067 // Compute a clip rect in |dest_pass| coordinate space to ensure merged
1068 // surface cannot draw outside where a non-merged surface would draw. An
1069 // enclosing rect in |surface_quad| target render pass coordinate space is
1070 // computed, then transformed into |dest_pass| coordinate space and finally
1071 // that is intersected with existing |added_clip_rect|.
1072 absl::optional<gfx::Rect> surface_quad_clip = CalculateClipRect(
1073 added_clip_rect, ComputeDrawableRectForQuad(surface_quad),
1076 // UpdatePersistentPassDataMergeState() has been called earlier.
1077 CopyQuadsToPass(resolved_frame, resolved_root_pass, dest_pass,
1078 frame.device_scale_factor(), combined_transform,
1079 surface_quad_clip, dest_root_target_clip_rect, surface,
1082 auto* shared_quad_state = CopyAndScaleSharedQuadState(
1083 surface_quad_sqs, embedder_client_namespace_id,
1084 scaled_quad_to_target_transform, target_transform,
1085 gfx::ScaleToEnclosingRect(surface_quad_sqs->quad_layer_rect,
1086 inverse_extra_content_scale_x,
1087 inverse_extra_content_scale_y),
1088 gfx::ScaleToEnclosingRect(surface_quad_sqs->visible_quad_layer_rect,
1089 inverse_extra_content_scale_x,
1090 inverse_extra_content_scale_y),
1091 added_clip_rect, mask_filter_info, dest_pass);
1093 // At this point, we need to calculate three values in order to construct
1094 // the CompositorRenderPassDrawQuad:
1096 // |quad_rect| - A rectangle representing the RenderPass's output area in
1097 // content space. This is equal to the root render pass (|last_pass|)
1099 gfx::Rect quad_rect = last_pass.output_rect;
1101 // |quad_visible_rect| - A rectangle representing the visible portion of
1102 // the RenderPass, in content space. As the SurfaceDrawQuad being
1103 // embedded may be clipped further than its root render pass, we use the
1104 // surface quad's value - |source_visible_rect|.
1106 // There may be an |extra_content_scale_x| applied when going from this
1107 // render pass's content space to the surface's content space, we remove
1108 // this so that |quad_visible_rect| is in the render pass's content
1110 gfx::Rect quad_visible_rect(gfx::ScaleToEnclosingRect(
1111 surface_quad_visible_rect, inverse_extra_content_scale_x,
1112 inverse_extra_content_scale_y));
1114 // |tex_coord_rect| - A rectangle representing the bounds of the texture
1115 // in the RenderPass's |quad_rect|. Not in content space, instead as an
1116 // offset within |quad_rect|.
1117 gfx::RectF tex_coord_rect = gfx::RectF(gfx::SizeF(quad_rect.size()));
1119 // We can't produce content outside of |quad_rect|, so clip the visible
1120 // rect if necessary.
1121 quad_visible_rect.Intersect(quad_rect);
1122 auto remapped_pass_id = resolved_root_pass.remapped_id();
1123 if (quad_visible_rect.IsEmpty()) {
1124 base::EraseIf(*dest_pass_list_,
1125 [&remapped_pass_id](
1126 const std::unique_ptr<AggregatedRenderPass>& pass) {
1127 return pass->id == remapped_pass_id;
1131 dest_pass->CreateAndAppendDrawQuad<AggregatedRenderPassDrawQuad>();
1132 quad->SetNew(shared_quad_state, quad_rect, quad_visible_rect,
1133 remapped_pass_id, kInvalidResourceId, gfx::RectF(),
1134 gfx::Size(), gfx::Vector2dF(1.0f, 1.0f), gfx::PointF(),
1136 /*force_anti_aliasing_off=*/false,
1137 /* backdrop_filter_quality*/ 1.0f);
1141 referenced_surfaces_.erase(surface_id);
1144 void SurfaceAggregator::EmitDefaultBackgroundColorQuad(
1145 const SurfaceDrawQuad* surface_quad,
1146 uint32_t embedder_client_namespace_id,
1147 const gfx::Transform& target_transform,
1148 const absl::optional<gfx::Rect> clip_rect,
1149 AggregatedRenderPass* dest_pass,
1150 const MaskFilterInfoExt& mask_filter_info) {
1151 TRACE_EVENT1("viz", "SurfaceAggregator::EmitDefaultBackgroundColorQuad",
1152 "surface_range", surface_quad->surface_range.ToString());
1154 // No matching surface was found so create a SolidColorDrawQuad with the
1155 // SurfaceDrawQuad default background color.
1156 SkColor4f background_color = surface_quad->default_background_color;
1157 auto* shared_quad_state = CopySharedQuadState(
1158 surface_quad->shared_quad_state, embedder_client_namespace_id,
1159 target_transform, clip_rect, mask_filter_info, dest_pass);
1161 auto* solid_color_quad =
1162 dest_pass->CreateAndAppendDrawQuad<SolidColorDrawQuad>();
1163 solid_color_quad->SetNew(shared_quad_state, surface_quad->rect,
1164 surface_quad->visible_rect, background_color, false);
1167 void SurfaceAggregator::EmitGutterQuadsIfNecessary(
1168 const gfx::Rect& primary_rect,
1169 const gfx::Rect& fallback_rect,
1170 const SharedQuadState* primary_shared_quad_state,
1171 uint32_t embedder_client_namespace_id,
1172 const gfx::Transform& target_transform,
1173 const absl::optional<gfx::Rect> clip_rect,
1174 SkColor4f background_color,
1175 AggregatedRenderPass* dest_pass,
1176 const MaskFilterInfoExt& mask_filter_info) {
1177 bool has_transparent_background = background_color == SkColors::kTransparent;
1179 // If the fallback Surface's active CompositorFrame has a non-transparent
1180 // background then compute gutter.
1181 if (has_transparent_background)
1184 if (fallback_rect.width() < primary_rect.width()) {
1185 // The right gutter also includes the bottom-right corner, if necessary.
1186 gfx::Rect right_gutter_rect(fallback_rect.right(), primary_rect.y(),
1187 primary_rect.width() - fallback_rect.width(),
1188 primary_rect.height());
1190 SharedQuadState* shared_quad_state = CopyAndScaleSharedQuadState(
1191 primary_shared_quad_state, embedder_client_namespace_id,
1192 primary_shared_quad_state->quad_to_target_transform, target_transform,
1193 right_gutter_rect, right_gutter_rect, clip_rect, mask_filter_info,
1196 auto* right_gutter =
1197 dest_pass->CreateAndAppendDrawQuad<SolidColorDrawQuad>();
1198 right_gutter->SetNew(shared_quad_state, right_gutter_rect,
1199 right_gutter_rect, background_color, false);
1202 if (fallback_rect.height() < primary_rect.height()) {
1203 gfx::Rect bottom_gutter_rect(
1204 primary_rect.x(), fallback_rect.bottom(), fallback_rect.width(),
1205 primary_rect.height() - fallback_rect.height());
1207 SharedQuadState* shared_quad_state = CopyAndScaleSharedQuadState(
1208 primary_shared_quad_state, embedder_client_namespace_id,
1209 primary_shared_quad_state->quad_to_target_transform, target_transform,
1210 bottom_gutter_rect, bottom_gutter_rect, clip_rect, mask_filter_info,
1213 auto* bottom_gutter =
1214 dest_pass->CreateAndAppendDrawQuad<SolidColorDrawQuad>();
1215 bottom_gutter->SetNew(shared_quad_state, bottom_gutter_rect,
1216 bottom_gutter_rect, background_color, false);
1220 void SurfaceAggregator::AddColorConversionPass() {
1221 auto* root_render_pass = dest_pass_list_->back().get();
1222 gfx::Rect output_rect = root_render_pass->output_rect;
1224 // An extra color conversion pass is only done if the display's color
1225 // space is unsuitable as a blending color space and the root render pass
1226 // requires blending.
1227 bool needs_color_conversion_pass =
1228 !display_color_spaces_
1229 .GetOutputColorSpace(root_render_pass->content_color_usage,
1230 root_render_pass->has_transparent_background)
1231 .IsSuitableForBlending();
1232 needs_color_conversion_pass &= root_render_pass->ShouldDrawWithBlending();
1234 // If we added or removed the color conversion pass, we need to add full
1235 // damage to the current-root renderpass (and also the new-root renderpass,
1236 // if the current-root renderpass becomes and intermediate renderpass).
1237 if (needs_color_conversion_pass != last_frame_had_color_conversion_pass_)
1238 root_render_pass->damage_rect = output_rect;
1240 last_frame_had_color_conversion_pass_ = needs_color_conversion_pass;
1241 if (!needs_color_conversion_pass)
1243 CHECK(root_render_pass->transform_to_root_target == gfx::Transform());
1245 if (!color_conversion_render_pass_id_) {
1246 color_conversion_render_pass_id_ =
1247 render_pass_id_generator_.GenerateNextId();
1250 AddRenderPassHelper(color_conversion_render_pass_id_, output_rect,
1251 root_render_pass->damage_rect, root_content_color_usage_,
1252 root_render_pass->has_transparent_background,
1253 #if defined(TIZEN_VIDEO_HOLE)
1254 root_render_pass->has_video_hole,
1256 /*pass_is_color_conversion_pass=*/true,
1257 /*quad_state_to_target_transform=*/gfx::Transform(),
1258 /*quad_state_contents_opaque=*/false, SkBlendMode::kSrc,
1259 root_render_pass->id);
1262 void SurfaceAggregator::AddRootReadbackPass() {
1263 if (extra_pass_for_readback_option_ == ExtraPassForReadbackOption::kNone) {
1267 auto* root_render_pass = dest_pass_list_->back().get();
1268 gfx::Rect output_rect = root_render_pass->output_rect;
1269 CHECK(root_render_pass->transform_to_root_target == gfx::Transform());
1270 bool needs_readback_pass = false;
1271 // Check if there are any render passes that draw into the root pass with
1272 // a backdrop filter.
1273 base::flat_set<AggregatedRenderPassId> pass_ids_drawing_to_root;
1274 for (auto* quad : root_render_pass->quad_list) {
1275 if (auto* render_pass_quad =
1276 quad->DynamicCast<AggregatedRenderPassDrawQuad>()) {
1277 pass_ids_drawing_to_root.insert(render_pass_quad->render_pass_id);
1280 if (!pass_ids_drawing_to_root.empty()) {
1281 for (auto& render_pass : *dest_pass_list_) {
1282 if (!pass_ids_drawing_to_root.contains(render_pass->id))
1284 if (!render_pass->backdrop_filters.IsEmpty()) {
1285 needs_readback_pass = true;
1291 if (extra_pass_for_readback_option_ ==
1292 ExtraPassForReadbackOption::kAlwaysAddPass) {
1293 needs_readback_pass = true;
1296 if (needs_readback_pass != last_frame_had_readback_pass_)
1297 root_render_pass->damage_rect = output_rect;
1299 last_frame_had_readback_pass_ = needs_readback_pass;
1300 if (!last_frame_had_readback_pass_)
1303 if (!readback_render_pass_id_) {
1304 readback_render_pass_id_ = render_pass_id_generator_.GenerateNextId();
1307 // Ensure the root-that's-non-root pass is cleared to fully transparent first.
1308 bool has_transparent_background =
1309 root_render_pass->has_transparent_background;
1310 root_render_pass->has_transparent_background = true;
1311 AddRenderPassHelper(readback_render_pass_id_, output_rect,
1312 root_render_pass->damage_rect, root_content_color_usage_,
1313 has_transparent_background,
1314 #if defined(TIZEN_VIDEO_HOLE)
1315 root_render_pass->has_video_hole,
1317 /*pass_is_color_conversion_pass=*/false,
1318 /*quad_state_to_target_transform=*/gfx::Transform(),
1319 /*quad_state_contents_opaque=*/false,
1320 SkBlendMode::kSrcOver, root_render_pass->id);
1323 void SurfaceAggregator::AddDisplayTransformPass() {
1324 if (dest_pass_list_->empty())
1327 auto* root_render_pass = dest_pass_list_->back().get();
1328 DCHECK(root_render_pass->transform_to_root_target == root_surface_transform_);
1330 if (!display_transform_render_pass_id_) {
1331 display_transform_render_pass_id_ =
1332 render_pass_id_generator_.GenerateNextId();
1335 bool are_contents_opaque = true;
1336 for (const auto* sqs : root_render_pass->shared_quad_state_list) {
1337 if (!sqs->are_contents_opaque) {
1338 are_contents_opaque = false;
1343 AddRenderPassHelper(
1344 display_transform_render_pass_id_,
1345 cc::MathUtil::MapEnclosedRectWith2dAxisAlignedTransform(
1346 root_surface_transform_, root_render_pass->output_rect),
1347 cc::MathUtil::MapEnclosedRectWith2dAxisAlignedTransform(
1348 root_surface_transform_, root_render_pass->damage_rect),
1349 root_render_pass->content_color_usage,
1350 root_render_pass->has_transparent_background,
1351 #if defined(TIZEN_VIDEO_HOLE)
1352 root_render_pass->has_video_hole,
1354 /*pass_is_color_conversion_pass=*/false, root_surface_transform_,
1355 are_contents_opaque, SkBlendMode::kSrcOver, root_render_pass->id);
1358 void SurfaceAggregator::AddRenderPassHelper(
1359 AggregatedRenderPassId render_pass_id,
1360 const gfx::Rect& render_pass_output_rect,
1361 const gfx::Rect& render_pass_damage_rect,
1362 gfx::ContentColorUsage pass_color_usage,
1363 bool pass_has_transparent_background,
1364 #if defined(TIZEN_VIDEO_HOLE)
1365 bool pass_is_video_hole,
1367 bool pass_is_color_conversion_pass,
1368 const gfx::Transform& quad_state_to_target_transform,
1369 bool quad_state_contents_opaque,
1370 SkBlendMode quad_state_blend_mode,
1371 AggregatedRenderPassId quad_pass_id) {
1372 gfx::Rect current_output_rect = dest_pass_list_->back()->output_rect;
1374 auto render_pass = std::make_unique<AggregatedRenderPass>(1, 1);
1375 render_pass->SetAll(render_pass_id, render_pass_output_rect,
1376 render_pass_damage_rect, gfx::Transform(),
1377 /*filters=*/cc::FilterOperations(),
1378 /*backdrop_filters=*/cc::FilterOperations(),
1379 /*backdrop_filter_bounds=*/gfx::RRectF(),
1380 pass_color_usage, pass_has_transparent_background,
1381 #if defined(TIZEN_VIDEO_HOLE)
1384 /*cache_render_pass=*/false,
1385 /*has_damage_from_contributing_content=*/false,
1386 /*generate_mipmap=*/false);
1387 render_pass->is_color_conversion_pass = pass_is_color_conversion_pass;
1389 auto* shared_quad_state = render_pass->CreateAndAppendSharedQuadState();
1390 shared_quad_state->SetAll(
1391 quad_state_to_target_transform,
1392 /*layer_rect=*/current_output_rect,
1393 /*visible_layer_rect=*/current_output_rect, gfx::MaskFilterInfo(),
1394 /*clip=*/absl::nullopt, quad_state_contents_opaque, /*opacity_f=*/1.f,
1395 quad_state_blend_mode, /*sorting_context=*/0, /*layer_id*/ 0u,
1396 /*fast_rounded_corner=*/false);
1399 render_pass->CreateAndAppendDrawQuad<AggregatedRenderPassDrawQuad>();
1400 quad->SetNew(shared_quad_state, current_output_rect, current_output_rect,
1401 quad_pass_id, kInvalidResourceId, gfx::RectF(), gfx::Size(),
1402 gfx::Vector2dF(1.0f, 1.0f), gfx::PointF(),
1403 gfx::RectF(current_output_rect),
1404 /*force_anti_aliasing_off=*/false,
1405 /*backdrop_filter_quality*/ 1.0f);
1406 dest_pass_list_->push_back(std::move(render_pass));
1409 void SurfaceAggregator::CopyQuadsToPass(
1410 ResolvedFrameData& resolved_frame,
1411 ResolvedPassData& resolved_pass,
1412 AggregatedRenderPass* dest_pass,
1413 float parent_device_scale_factor,
1414 const gfx::Transform& target_transform,
1415 const absl::optional<gfx::Rect> clip_rect,
1416 const absl::optional<gfx::Rect> dest_root_target_clip_rect,
1417 const Surface* surface,
1418 const MaskFilterInfoExt& parent_mask_filter_info_ext) {
1419 const CompositorRenderPass& source_pass = resolved_pass.render_pass();
1420 const QuadList& source_quad_list = source_pass.quad_list;
1421 const SharedQuadState* last_copied_source_shared_quad_state = nullptr;
1423 #if defined(TIZEN_VIDEO_HOLE)
1424 // We have this flag for checking need to disable blend mode in GLRenderer
1425 // We need to pass this flag to AggregatedRenderPass as it was created by
1427 if (source_pass.has_video_hole) {
1428 dest_pass->has_video_hole = true;
1432 // If the current frame has copy requests or cached render passes, then
1433 // aggregate the entire thing, as otherwise parts of the copy requests may be
1434 // ignored and we could cache partially drawn render pass.
1435 // If there are pixel-moving backdrop filters then the damage rect might be
1436 // expanded later, so we can't drop quads that are outside the current damage
1438 // If overlay/underlay is enabled then the underlay rect might be added to the
1439 // damage rect later. We are not able to predict right here which draw quad
1440 // candidate will be promoted to overlay/underlay. Also, we might drop quads
1441 // which are on top of an underlay and cause the overlay processor to
1442 // present the quad as an overlay instead of an underlay.
1443 const bool ignore_undamaged =
1444 aggregate_only_damaged_ && !has_copy_requests_ &&
1445 !has_pixel_moving_backdrop_filter_ &&
1446 !resolved_pass.aggregation().in_cached_render_pass &&
1447 !resolved_pass.aggregation().in_pixel_moving_filter_pass &&
1448 !RenderPassNeedsFullDamage(resolved_pass);
1449 // TODO(kylechar): For copy render passes we only need to draw all quads if
1450 // those attributes are set on the current render pass' aggregation data. The
1451 // complication is if a SurfaceDrawQuad is dropped and that surface has a copy
1452 // request on it then we still need to draw the surface.
1454 // Damage rect in the quad space of the current shared quad state.
1455 // TODO(jbauman): This rect may contain unnecessary area if
1456 // transform isn't axis-aligned.
1457 gfx::Rect damage_rect_in_quad_space;
1458 bool damage_rect_in_quad_space_valid = false;
1461 const SharedQuadStateList& source_shared_quad_state_list =
1462 source_pass.shared_quad_state_list;
1463 // If quads have come in with SharedQuadState out of order, or when quads have
1464 // invalid SharedQuadState pointer, it should DCHECK.
1465 auto sqs_iter = source_shared_quad_state_list.cbegin();
1466 for (auto* quad : source_quad_list) {
1467 while (sqs_iter != source_shared_quad_state_list.cend() &&
1468 quad->shared_quad_state != *sqs_iter) {
1471 DCHECK(sqs_iter != source_shared_quad_state_list.cend());
1475 size_t overlay_damage_index = 0;
1476 const DrawQuad* quad_with_overlay_damage_index = nullptr;
1477 if (needs_surface_damage_rect_list_ &&
1478 resolved_pass.aggregation().will_draw) {
1479 // TODO(crbug.com/1323002): If there is one specific quad for this pass's
1480 // damage we should move the allocation of the damage index below to be
1481 // consistent with quad ordering.
1482 quad_with_overlay_damage_index =
1483 FindQuadWithOverlayDamage(source_pass, dest_pass, target_transform,
1484 surface, &overlay_damage_index);
1487 gfx::Transform pass_to_dest_root_target_transform =
1488 dest_pass->transform_to_root_target * target_transform;
1490 // Add render pass |output_rect| to |dest_root_target_clip_rect|.
1491 auto new_dest_root_target_clip_rect = CalculateClipRect(
1492 dest_root_target_clip_rect, resolved_pass.render_pass().output_rect,
1493 pass_to_dest_root_target_transform);
1495 UpdateNeedsRedraw(resolved_pass, dest_pass, new_dest_root_target_clip_rect);
1497 size_t quad_index = 0;
1498 auto& resolved_draw_quads = resolved_pass.draw_quads();
1500 uint32_t client_namespace_id = resolved_frame.GetClientNamespaceId();
1502 for (auto* quad : source_quad_list) {
1503 const ResolvedQuadData& quad_data = resolved_draw_quads[quad_index++];
1505 // Both cannot be set at once (rounded corners are exception to this). If
1506 // this happens then a surface is being merged when it should not.
1507 DCHECK(!quad->shared_quad_state->mask_filter_info.HasGradientMask() ||
1508 !parent_mask_filter_info_ext.mask_filter_info.HasGradientMask());
1510 MaskFilterInfoExt new_mask_filter_info_ext = parent_mask_filter_info_ext;
1511 if (!quad->shared_quad_state->mask_filter_info.IsEmpty()) {
1512 new_mask_filter_info_ext = MaskFilterInfoExt(
1513 quad->shared_quad_state->mask_filter_info,
1514 quad->shared_quad_state->is_fast_rounded_corner, target_transform);
1517 if (quad->material == DrawQuad::Material::kSharedElement) {
1518 // SharedElement quads should've been resolved before aggregation.
1520 } else if (const auto* surface_quad =
1521 quad->DynamicCast<SurfaceDrawQuad>()) {
1522 // HandleSurfaceQuad may add other shared quad state, so reset the
1524 last_copied_source_shared_quad_state = nullptr;
1526 if (!surface_quad->surface_range.end().is_valid())
1529 HandleSurfaceQuad(source_pass, surface_quad, client_namespace_id,
1530 parent_device_scale_factor, target_transform, clip_rect,
1531 new_dest_root_target_clip_rect, dest_pass,
1532 ignore_undamaged, &damage_rect_in_quad_space,
1533 &damage_rect_in_quad_space_valid,
1534 new_mask_filter_info_ext);
1536 // Here we output the optional quad's |per_quad_damage| to the
1537 // |surface_damage_rect_list_|. Any non per quad damage associated with
1538 // this |source_pass| will have been added to the
1539 // |surface_damage_rect_list_| before this phase.
1541 quad->shared_quad_state != last_copied_source_shared_quad_state;
1542 bool has_per_quad_damage =
1543 source_pass.has_per_quad_damage &&
1544 GetOptionalDamageRectFromQuad(quad).has_value() &&
1545 resolved_pass.aggregation().will_draw;
1547 if (needs_sqs || has_per_quad_damage) {
1548 SharedQuadState* dest_shared_quad_state = CopySharedQuadState(
1549 quad->shared_quad_state, client_namespace_id, target_transform,
1550 clip_rect, new_mask_filter_info_ext, dest_pass);
1552 if (has_per_quad_damage) {
1553 auto damage_rect_in_target_space =
1554 GetOptionalDamageRectFromQuad(quad);
1555 dest_shared_quad_state->overlay_damage_index =
1556 surface_damage_rect_list_->size();
1557 AddSurfaceDamageToDamageList(damage_rect_in_target_space.value(),
1559 new_dest_root_target_clip_rect,
1560 dest_pass->transform_to_root_target,
1561 /*resolved_frame=*/nullptr);
1562 } else if (quad == quad_with_overlay_damage_index) {
1563 dest_shared_quad_state->overlay_damage_index = overlay_damage_index;
1566 last_copied_source_shared_quad_state = quad->shared_quad_state;
1567 if (ignore_undamaged) {
1568 damage_rect_in_quad_space_valid = CalculateQuadSpaceDamageRect(
1569 dest_shared_quad_state->quad_to_target_transform,
1570 dest_pass->transform_to_root_target, root_damage_rect_,
1571 &damage_rect_in_quad_space);
1575 if (ignore_undamaged) {
1576 if (damage_rect_in_quad_space_valid &&
1577 !damage_rect_in_quad_space.Intersects(quad->visible_rect))
1581 DrawQuad* dest_quad = nullptr;
1582 if (const auto* pass_quad =
1583 quad->DynamicCast<CompositorRenderPassDrawQuad>()) {
1584 CompositorRenderPassId original_pass_id = pass_quad->render_pass_id;
1585 AggregatedRenderPassId remapped_pass_id =
1586 resolved_frame.GetRenderPassDataById(original_pass_id)
1589 dest_quad = dest_pass->CopyFromAndAppendRenderPassDrawQuad(
1590 pass_quad, remapped_pass_id);
1592 if (needs_surface_damage_rect_list_ &&
1593 resolved_pass.aggregation().will_draw) {
1594 AddRenderPassFilterDamageToDamageList(
1595 resolved_frame, pass_quad, target_transform,
1596 new_dest_root_target_clip_rect,
1597 dest_pass->transform_to_root_target);
1599 } else if (const auto* texture_quad =
1600 quad->DynamicCast<TextureDrawQuad>()) {
1601 if (texture_quad->secure_output_only &&
1602 (!output_is_secure_ ||
1603 resolved_pass.aggregation().in_copy_request_pass)) {
1604 // If TextureDrawQuad requires secure output and the output is not
1605 // secure then replace it with solid black.
1606 auto* solid_color_quad =
1607 dest_pass->CreateAndAppendDrawQuad<SolidColorDrawQuad>();
1608 solid_color_quad->SetNew(dest_pass->shared_quad_state_list.back(),
1609 quad->rect, quad->visible_rect,
1610 SkColors::kBlack, false);
1612 dest_quad = dest_pass->CopyFromAndAppendDrawQuad(quad);
1615 dest_quad = dest_pass->CopyFromAndAppendDrawQuad(quad);
1618 dest_quad->resources = quad_data.remapped_resources;
1624 void SurfaceAggregator::CopyPasses(ResolvedFrameData& resolved_frame) {
1625 Surface* surface = resolved_frame.surface();
1626 const CompositorFrame& frame = surface->GetActiveOrInterpolatedFrame();
1628 // The root surface is allowed to have copy output requests, so grab them
1629 // off its render passes. This map contains a set of CopyOutputRequests
1630 // keyed by each RenderPass id.
1631 Surface::CopyRequestsMap copy_requests;
1632 if (take_copy_requests_)
1633 surface->TakeCopyOutputRequests(©_requests);
1635 const auto& source_pass_list = frame.render_pass_list;
1636 if (!resolved_frame.is_valid())
1639 ++stats_->copied_surface_count;
1641 const gfx::Transform surface_transform =
1642 IsRootSurface(surface) ? root_surface_transform_ : gfx::Transform();
1644 auto& root_resolved_pass = resolved_frame.GetRootRenderPassData();
1645 gfx::Rect root_output_rect =
1646 cc::MathUtil::MapEnclosedRectWith2dAxisAlignedTransform(
1647 surface_transform, root_resolved_pass.render_pass().output_rect);
1649 if (frame.metadata.delegated_ink_metadata) {
1650 DCHECK(surface->GetActiveFrameMetadata().delegated_ink_metadata ==
1651 frame.metadata.delegated_ink_metadata);
1652 // Copy delegated ink metadata from the compositor frame metadata. This
1653 // prevents the delegated ink trail from flickering if a compositor frame
1654 // is not generated due to a delayed main frame.
1655 TransformAndStoreDelegatedInkMetadata(
1656 source_pass_list.back()->transform_to_root_target * surface_transform,
1657 frame.metadata.delegated_ink_metadata.get());
1660 bool apply_surface_transform_to_root_pass = true;
1661 for (auto& resolved_pass : resolved_frame.GetResolvedPasses()) {
1662 const auto& source = resolved_pass.render_pass();
1664 size_t sqs_size = source.shared_quad_state_list.size();
1665 size_t dq_size = source.quad_list.size();
1666 auto copy_pass = std::make_unique<AggregatedRenderPass>(sqs_size, dq_size);
1668 MoveMatchingRequests(source.id, ©_requests, ©_pass->copy_requests);
1670 // We add an additional render pass for the transform if the root render
1671 // pass has any copy requests.
1672 apply_surface_transform_to_root_pass =
1673 resolved_pass.is_root() &&
1674 (copy_pass->copy_requests.empty() || surface_transform.IsIdentity());
1676 gfx::Rect output_rect = source.output_rect;
1677 gfx::Transform transform_to_root_target = source.transform_to_root_target;
1678 if (apply_surface_transform_to_root_pass) {
1679 // If we don't need an additional render pass to apply the surface
1680 // transform, adjust the root pass's rects to account for it.
1681 output_rect = root_output_rect;
1683 // For the non-root render passes, the transform to root target needs to
1684 // be adjusted to include the root surface transform. This is also true if
1685 // we will be adding another render pass for the surface transform, in
1686 // which this will no longer be the root.
1687 transform_to_root_target =
1688 surface_transform * source.transform_to_root_target;
1692 resolved_pass.remapped_id(), output_rect, output_rect,
1693 transform_to_root_target, source.filters, source.backdrop_filters,
1694 source.backdrop_filter_bounds, root_content_color_usage_,
1695 source.has_transparent_background,
1696 #if defined(TIZEN_VIDEO_HOLE)
1697 source.has_video_hole,
1699 source.cache_render_pass, resolved_pass.aggregation().has_damage,
1700 source.generate_mipmap);
1702 UpdatePersistentPassDataMergeState(resolved_pass, copy_pass.get(),
1703 /*is_merged_pass=*/false);
1705 if (needs_surface_damage_rect_list_ && resolved_pass.is_root()) {
1706 AddSurfaceDamageToDamageList(
1707 /*default_damage_rect=*/gfx::Rect(),
1708 /*parent_target_transform=*/surface_transform,
1709 /*dest_root_target_clip_rect=*/{},
1710 copy_pass->transform_to_root_target, &resolved_frame);
1713 CopyQuadsToPass(resolved_frame, resolved_pass, copy_pass.get(),
1714 frame.device_scale_factor(),
1715 apply_surface_transform_to_root_pass ? surface_transform
1717 {}, /*dest_root_target_clip_rect*/ root_output_rect,
1718 surface, MaskFilterInfoExt());
1720 SetRenderPassDamageRect(copy_pass.get(), resolved_pass);
1722 dest_pass_list_->push_back(std::move(copy_pass));
1725 if (!apply_surface_transform_to_root_pass)
1726 AddDisplayTransformPass();
1729 void SurfaceAggregator::SetRenderPassDamageRect(
1730 AggregatedRenderPass* copy_pass,
1731 ResolvedPassData& resolved_pass) {
1732 // If the render pass has copy requests, or should be cached, or has
1733 // moving-pixel filters, or in a moving-pixel surface, we should damage the
1734 // whole output rect so that we always drawn the full content. Otherwise, we
1735 // might have incompleted copy request, or cached patially drawn render
1738 if (!RenderPassNeedsFullDamage(resolved_pass)) {
1739 gfx::Transform inverse_transform;
1740 if (copy_pass->transform_to_root_target.GetInverse(&inverse_transform)) {
1741 gfx::Rect damage_rect_in_render_pass_space =
1742 cc::MathUtil::ProjectEnclosingClippedRect(inverse_transform,
1744 copy_pass->damage_rect.Intersect(damage_rect_in_render_pass_space);
1747 // For unembeded render passes, their damages were not added to the
1748 // root render pass. Add back the original damage from cc so it can be
1749 // skipped later when there is no internal damage.
1750 static const bool can_skip_render_pass = base::FeatureList::IsEnabled(
1751 features::kAllowUndamagedNonrootRenderPassToSkip);
1752 if (resolved_pass.IsUnembedded() && can_skip_render_pass) {
1753 copy_pass->damage_rect.Union(resolved_pass.aggregation().added_damage);
1758 void SurfaceAggregator::ProcessAddedAndRemovedSurfaces() {
1759 // Delete resolved frame data that wasn't used this aggregation. This releases
1760 // resources associated with those resolved frames.
1761 base::EraseIf(resolved_frames_, [](auto& entry) {
1762 return !entry.second.WasUsedInAggregation();
1766 gfx::Rect SurfaceAggregator::PrewalkRenderPass(
1767 ResolvedFrameData& resolved_frame,
1768 ResolvedPassData& resolved_pass,
1769 const gfx::Rect& damage_from_parent,
1770 const gfx::Transform& target_to_root_transform,
1771 const ResolvedPassData* parent_pass,
1772 PrewalkResult& result) {
1773 const CompositorRenderPass& render_pass = resolved_pass.render_pass();
1775 if (render_pass.backdrop_filters.HasFilterThatMovesPixels()) {
1776 has_pixel_moving_backdrop_filter_ = true;
1779 if (parent_pass && parent_pass->aggregation().will_draw)
1780 resolved_pass.aggregation().will_draw = true;
1782 // Populate state for about cached render passes and pixel moving filters.
1783 // These attributes apply transitively to all child render passes embedded by
1784 // the CompositorRenderPass with the attribute.
1785 if (render_pass.cache_render_pass ||
1786 (parent_pass && parent_pass->aggregation().in_cached_render_pass)) {
1787 resolved_pass.aggregation().in_cached_render_pass = true;
1790 if (render_pass.filters.HasFilterThatMovesPixels() ||
1791 (parent_pass && parent_pass->aggregation().in_pixel_moving_filter_pass)) {
1792 resolved_pass.aggregation().in_pixel_moving_filter_pass = true;
1793 stats_->has_pixel_moving_filter = true;
1796 const FrameDamageType damage_type = resolved_frame.GetFrameDamageType();
1797 if (damage_type == FrameDamageType::kFull) {
1798 resolved_pass.aggregation().has_damage = true;
1799 } else if (damage_type == FrameDamageType::kFrame &&
1800 render_pass.has_damage_from_contributing_content) {
1801 resolved_pass.aggregation().has_damage = true;
1804 // The damage on the root render pass of the surface comes from damage
1805 // accumulated from all quads in the surface, and needs to be expanded by any
1806 // pixel-moving backdrop filter in the render pass if intersecting. Transform
1807 // this damage into the local space of the render pass for this purpose.
1808 // TODO(kylechar): If this render pass isn't reachable from the surfaces root
1809 // render pass then surface damage can't be transformed into this render pass
1810 // coordinate space. We should use the actual damage for the render pass,
1811 // which isn't included in the CompositorFrame right now.
1812 gfx::Rect surface_root_rp_damage = resolved_frame.GetSurfaceDamage();
1813 if (!surface_root_rp_damage.IsEmpty()) {
1814 gfx::Transform root_to_target_transform;
1815 if (target_to_root_transform.GetInverse(&root_to_target_transform)) {
1816 surface_root_rp_damage = cc::MathUtil::ProjectEnclosingClippedRect(
1817 root_to_target_transform, surface_root_rp_damage);
1821 gfx::Rect damage_rect;
1822 // Iterate through the quad list back-to-front and accumulate damage from
1823 // all quads (only SurfaceDrawQuads and RenderPassDrawQuads can have damage
1824 // at this point). |damage_rect| has damage from all quads below the current
1825 // iterated quad, and can be used to determine if there's any intersection
1826 // with the current quad when needed.
1827 for (const DrawQuad* quad : base::Reversed(resolved_pass.prewalk_quads())) {
1828 gfx::Rect quad_damage_rect;
1829 gfx::Rect quad_target_space_damage_rect;
1830 if (quad->material == DrawQuad::Material::kSurfaceContent) {
1831 const auto* surface_quad = SurfaceDrawQuad::MaterialCast(quad);
1832 ResolvedFrameData* child_resolved_frame =
1833 GetResolvedFrame(surface_quad->surface_range);
1835 // If the primary surface is not available then we assume the damage is
1836 // the full size of the SurfaceDrawQuad because we might need to introduce
1838 if (!child_resolved_frame || child_resolved_frame->surface_id() !=
1839 surface_quad->surface_range.end()) {
1840 quad_damage_rect = quad->rect;
1843 if (child_resolved_frame) {
1844 float x_scale = SK_Scalar1;
1845 float y_scale = SK_Scalar1;
1846 if (surface_quad->stretch_content_to_fill_bounds) {
1847 const gfx::Size& child_size =
1848 child_resolved_frame->surface()->size_in_pixels();
1849 if (!child_size.IsEmpty()) {
1850 x_scale = static_cast<float>(surface_quad->rect.width()) /
1852 y_scale = static_cast<float>(surface_quad->rect.height()) /
1853 child_size.height();
1856 // If not stretching to fit bounds then scale to adjust to device
1857 // scale factor differences between child and parent surface. This
1858 // scale factor is later applied to quads in the aggregated frame.
1860 resolved_frame.surface()->device_scale_factor() /
1861 child_resolved_frame->surface()->device_scale_factor();
1863 // If the surface quad is to be merged potentially, the current
1864 // effective accumulated damage needs to be taken into account. This
1865 // includes the damage from quads under the surface quad, i.e.
1866 // |damage_rect|, |surface_root_rp_damage|, which can contain damage
1867 // contributed by quads under the surface quad in the previous stage
1868 // (cc), and |damage_from_parent|. The damage is first transformed into
1869 // the local space of the surface quad and then passed to the embedding
1870 // surface. The condition for deciding if the surface quad will merge is
1871 // loose here, so for those quads passed this condition but eventually
1872 // don't merge, there is over-contribution of the damage passed from
1873 // parent, but this shouldn't affect correctness.
1874 gfx::Rect accumulated_damage_in_child_space;
1876 if (CanPotentiallyMergePass(*surface_quad)) {
1877 accumulated_damage_in_child_space.Union(damage_rect);
1878 accumulated_damage_in_child_space.Union(damage_from_parent);
1879 accumulated_damage_in_child_space.Union(surface_root_rp_damage);
1880 if (!accumulated_damage_in_child_space.IsEmpty()) {
1881 gfx::Transform inverse =
1882 quad->shared_quad_state->quad_to_target_transform
1883 .GetCheckedInverse();
1884 inverse.PostScale(SK_Scalar1 / x_scale, SK_Scalar1 / y_scale);
1885 accumulated_damage_in_child_space =
1886 cc::MathUtil::ProjectEnclosingClippedRect(
1887 inverse, accumulated_damage_in_child_space);
1890 gfx::Rect child_rect =
1891 PrewalkSurface(*child_resolved_frame, &resolved_pass,
1892 accumulated_damage_in_child_space, result);
1893 child_rect = gfx::ScaleToEnclosingRect(child_rect, x_scale, y_scale);
1894 quad_damage_rect.Union(child_rect);
1897 // Only check for root render pass on the root surface.
1898 if (parent_pass == nullptr && resolved_pass.is_root() &&
1899 !result.page_fullscreen_mode) {
1900 gfx::Rect surface_quad_on_target_space = ClippedQuadRectangle(quad);
1901 // Often time the surface_quad_on_target_space is not exactly the same
1902 // as the output_rect after the math operations, although they are meant
1903 // to be the same. Set the delta tolerance to 8 pixels.
1904 if (surface_quad_on_target_space.ApproximatelyEqual(
1905 render_pass.output_rect, /*tolerance=*/8)) {
1906 result.page_fullscreen_mode = true;
1909 } else if (auto* render_pass_quad =
1910 quad->DynamicCast<CompositorRenderPassDrawQuad>()) {
1911 CompositorRenderPassId child_pass_id = render_pass_quad->render_pass_id;
1913 ResolvedPassData& child_resolved_pass =
1914 resolved_frame.GetRenderPassDataById(child_pass_id);
1915 const CompositorRenderPass& child_render_pass =
1916 child_resolved_pass.render_pass();
1918 gfx::Rect rect_in_target_space = cc::MathUtil::MapEnclosingClippedRect(
1919 quad->shared_quad_state->quad_to_target_transform, quad->rect);
1921 // |damage_rect|, |damage_from_parent| and |surface_root_rp_damage|
1922 // either are or can possible contain damage from under the quad, so if
1923 // they intersect the quad render pass output rect, we have to invalidate
1924 // the |intersects_damage_under| flag. Note the intersection test can be
1925 // done against backdrop filter bounds as an improvement.
1926 bool intersects_current_damage =
1927 rect_in_target_space.Intersects(damage_rect);
1928 bool intersects_damage_from_parent =
1929 rect_in_target_space.Intersects(damage_from_parent);
1930 // The |intersects_damage_under| flag hints if the current quad intersects
1931 // any damage from any quads below in the same surface. If the flag is
1932 // false, it means the intersecting damage is from quads above it or from
1934 bool intersects_damage_from_surface =
1935 rect_in_target_space.Intersects(surface_root_rp_damage);
1936 if (intersects_current_damage || intersects_damage_from_parent ||
1937 intersects_damage_from_surface) {
1938 render_pass_quad->intersects_damage_under = true;
1940 if (child_render_pass.backdrop_filters.HasFilterThatMovesPixels()) {
1941 // The damage from under the quad intersects quad render pass output
1942 // rect and it has to be expanded because of the pixel-moving
1943 // backdrop filters. We expand the |damage_rect| to include quad
1944 // render pass output rect (which can be optimized to be backdrop
1945 // filter bounds). |damage_from_parent| and |surface_root_rp_damage|
1946 // only have to be included when they also have intersection with the
1948 damage_rect.Union(rect_in_target_space);
1949 if (intersects_damage_from_parent) {
1950 damage_rect.Union(damage_from_parent);
1952 if (intersects_damage_from_surface) {
1953 damage_rect.Union(surface_root_rp_damage);
1957 // For the pixel-moving backdrop filters, all effects are limited to the
1958 // size of the RenderPassDrawQuad rect. Therefore when we find the damage
1959 // under the quad intersects quad render pass output rect, we extend the
1960 // damage rect to include the rpdq->rect.
1961 // TODO(crbug/1379125): Work out how to correctly compute damage when
1962 // offset backdrop filters may be involved.
1964 // For the pixel-moving foreground filters, all effects can be expanded
1965 // outside the RenderPassDrawQuad rect based on filter pixel movement.
1966 // Therefore, we have to check if the expanded rpdq->rect intersects the
1967 // damage under it. Then we extend the damage rect to include the expanded
1970 // Expand the damage to cover entire |output_rect| if the |render_pass|
1971 // has pixel-moving foreground filter.
1972 if (child_render_pass.filters.HasFilterThatMovesPixels()) {
1973 gfx::Rect expanded_rect_in_target_space =
1974 GetExpandedRectWithPixelMovingForegroundFilter(
1975 *render_pass_quad, child_render_pass.filters);
1977 if (expanded_rect_in_target_space.Intersects(damage_rect) ||
1978 expanded_rect_in_target_space.Intersects(damage_from_parent) ||
1979 expanded_rect_in_target_space.Intersects(surface_root_rp_damage)) {
1980 damage_rect.Union(expanded_rect_in_target_space);
1984 resolved_pass.aggregation().embedded_passes.insert(&child_resolved_pass);
1986 const gfx::Transform child_to_root_transform =
1987 target_to_root_transform *
1988 quad->shared_quad_state->quad_to_target_transform;
1990 PrewalkRenderPass(resolved_frame, child_resolved_pass, gfx::Rect(),
1991 child_to_root_transform, &resolved_pass, result);
1994 // If this the next frame in sequence from last aggregation then per quad
1995 // damage_rects are valid so add them here. If not, either this is the
1996 // same frame as last aggregation and there is no damage OR there is
1997 // already full damage for the surface.
1998 if (damage_type == FrameDamageType::kFrame) {
1999 auto& per_quad_damage_rect = GetOptionalDamageRectFromQuad(quad);
2000 DCHECK(per_quad_damage_rect.has_value());
2001 // The DrawQuad `per_quad_damage_rect` is already in the render pass
2002 // coordinate space instead of quad rect coordinate space.
2003 quad_target_space_damage_rect = per_quad_damage_rect.value();
2007 // Clip the quad damage to the quad visible before converting back to
2008 // render pass coordinate space. Expanded damage outside the quad rect for
2009 // filters are added to |damage_rect| directly so this only clips damage
2010 // from drawing the quad itself.
2011 quad_damage_rect.Intersect(quad->visible_rect);
2013 if (!quad_damage_rect.IsEmpty()) {
2014 // Convert the quad damage rect into its target space and clip it if
2015 // needed. Ignore tiny errors to avoid artificially inflating the
2016 // damage due to floating point math.
2017 constexpr float kEpsilon = 0.001f;
2018 quad_target_space_damage_rect =
2019 cc::MathUtil::MapEnclosingClippedRectIgnoringError(
2020 quad->shared_quad_state->quad_to_target_transform,
2021 quad_damage_rect, kEpsilon);
2024 if (!quad_target_space_damage_rect.IsEmpty()) {
2025 if (quad->shared_quad_state->clip_rect) {
2026 quad_target_space_damage_rect.Intersect(
2027 *quad->shared_quad_state->clip_rect);
2029 damage_rect.Union(quad_target_space_damage_rect);
2033 if (!damage_rect.IsEmpty()) {
2034 // There is extra damage for this render pass. This is damage that the
2035 // client that submitted this render pass didn't know about and isn't
2036 // included in the surface damage or `has_damage_from_contributing_content`.
2037 resolved_pass.aggregation().has_damage = true;
2039 if (render_pass.filters.HasFilterThatMovesPixels()) {
2040 // Expand the damage to cover entire |output_rect| if the |render_pass|
2041 // has pixel-moving foreground filter.
2042 damage_rect.Union(render_pass.output_rect);
2045 // The added damage from quads in the render pass is transformed back into
2046 // the render pass coordinate space without clipping, so it can extend
2047 // beyond the edge of the current render pass. Coordinates outside the
2048 // output_rect are invalid in this render passes coordinate space but they
2049 // may be valid coordinates in the embedder coordinate space, causing
2050 // unnecessary damage expansion.
2051 damage_rect.Intersect(render_pass.output_rect);
2053 resolved_pass.aggregation().added_damage.Union(damage_rect);
2059 void SurfaceAggregator::ProcessResolvedFrame(
2060 ResolvedFrameData& resolved_frame) {
2061 Surface* surface = resolved_frame.surface();
2062 const CompositorFrame& compositor_frame =
2063 surface->GetActiveOrInterpolatedFrame();
2065 // Ref the resources in the surface, and let the provider know we've received
2066 // new resources from the compositor frame.
2067 if (surface->client())
2068 surface->client()->RefResources(compositor_frame.resource_list);
2070 resolved_frame.UpdateForActiveFrame(render_pass_id_generator_);
2073 bool SurfaceAggregator::CheckFrameSinksChanged(const Surface* surface) {
2074 contained_surfaces_.insert(surface->surface_id());
2075 LocalSurfaceId& local_surface_id =
2076 contained_frame_sinks_[surface->surface_id().frame_sink_id()];
2077 bool frame_sinks_changed = (!previous_contained_frame_sinks_.contains(
2078 surface->surface_id().frame_sink_id()));
2080 std::max(surface->surface_id().local_surface_id(), local_surface_id);
2081 return frame_sinks_changed;
2084 gfx::Rect SurfaceAggregator::PrewalkSurface(ResolvedFrameData& resolved_frame,
2085 ResolvedPassData* parent_pass,
2086 const gfx::Rect& damage_from_parent,
2087 PrewalkResult& result) {
2088 Surface* surface = resolved_frame.surface();
2089 DCHECK(surface->HasActiveFrame());
2091 if (referenced_surfaces_.count(surface->surface_id()))
2094 result.frame_sinks_changed |= CheckFrameSinksChanged(surface);
2096 if (!resolved_frame.is_valid())
2099 DebugLogSurface(surface, resolved_frame.WillDraw());
2100 ++stats_->prewalked_surface_count;
2102 auto& root_resolved_pass = resolved_frame.GetRootRenderPassData();
2104 parent_pass->aggregation().embedded_passes.insert(&root_resolved_pass);
2107 gfx::Rect damage_rect = resolved_frame.GetSurfaceDamage();
2109 // Avoid infinite recursion by adding current surface to
2110 // |referenced_surfaces_|.
2111 referenced_surfaces_.insert(surface->surface_id());
2113 for (auto& resolved_pass : resolved_frame.GetResolvedPasses()) {
2114 // Prewalk any render passes that aren't reachable from the root pass. The
2115 // damage produced isn't correct since there is no transform between damage
2116 // in the root render passes coordinate space and the unembedded render
2117 // pass, but other attributes related to the embedding hierarchy are still
2118 // important to propagate.
2119 if (resolved_pass.IsUnembedded()) {
2120 stats_->has_unembedded_pass = true;
2121 resolved_pass.aggregation().added_damage =
2122 PrewalkRenderPass(resolved_frame, resolved_pass,
2123 /*damage_from_parent=*/gfx::Rect(),
2124 /*target_to_root_transform=*/gfx::Transform(),
2125 /*parent_pass=*/nullptr, result);
2129 damage_rect.Union(PrewalkRenderPass(resolved_frame, root_resolved_pass,
2130 damage_from_parent, gfx::Transform(),
2131 parent_pass, result));
2133 if (!damage_rect.IsEmpty()) {
2134 auto damage_rect_surface_space = damage_rect;
2135 if (IsRootSurface(surface)) {
2136 // The damage reported to the surface is in pre-display transform space
2137 // since it is used by clients which are not aware of the display
2139 damage_rect = cc::MathUtil::MapEnclosedRectWith2dAxisAlignedTransform(
2140 root_surface_transform_, damage_rect);
2141 gfx::Transform inverse = root_surface_transform_.GetCheckedInverse();
2142 damage_rect_surface_space =
2143 cc::MathUtil::MapEnclosedRectWith2dAxisAlignedTransform(inverse,
2147 // The following call can cause one or more copy requests to be added to the
2148 // Surface. Therefore, no code before this point should have assumed
2149 // anything about the presence or absence of copy requests after this point.
2150 surface->NotifyAggregatedDamage(damage_rect_surface_space,
2151 expected_display_time_);
2154 // If any CopyOutputRequests were made at FrameSink level, make sure we grab
2156 surface->TakeCopyOutputRequestsFromClient();
2158 if (root_resolved_pass.aggregation().will_draw)
2159 surface->OnWillBeDrawn();
2161 const CompositorFrame& frame = surface->GetActiveOrInterpolatedFrame();
2163 #if BUILDFLAG(IS_EFL)
2164 if (frame.metadata.can_skip_flush)
2165 result.can_skip_flush = true;
2168 for (const SurfaceRange& surface_range : frame.metadata.referenced_surfaces) {
2169 damage_ranges_[surface_range.end().frame_sink_id()].push_back(
2171 if (surface_range.HasDifferentFrameSinkIds()) {
2172 damage_ranges_[surface_range.start()->frame_sink_id()].push_back(
2177 for (const SurfaceId& surface_id : surface->active_referenced_surfaces()) {
2178 if (!contained_surfaces_.count(surface_id)) {
2179 result.undrawn_surfaces.insert(surface_id);
2180 ResolvedFrameData* undrawn_surface = GetResolvedFrame(surface_id);
2181 if (undrawn_surface) {
2182 PrewalkSurface(*undrawn_surface, /*parent_pass=*/nullptr, gfx::Rect(),
2188 for (auto& resolved_pass : resolved_frame.GetResolvedPasses()) {
2189 auto& render_pass = resolved_pass.render_pass();
2191 // Checking for copy requests need to be done after the prewalk because
2192 // copy requests can get added after damage is computed.
2193 if (!render_pass.copy_requests.empty()) {
2194 has_copy_requests_ = true;
2195 MarkAndPropagateCopyRequestPasses(resolved_pass);
2199 referenced_surfaces_.erase(surface->surface_id());
2200 result.content_color_usage =
2201 std::max(result.content_color_usage, frame.metadata.content_color_usage);
2206 void SurfaceAggregator::CopyUndrawnSurfaces(PrewalkResult* prewalk_result) {
2207 // undrawn_surfaces are Surfaces that were identified by prewalk as being
2208 // referenced by a drawn Surface, but aren't contained in a SurfaceDrawQuad.
2209 // They need to be iterated over to ensure that any copy requests on them
2210 // (or on Surfaces they reference) are executed.
2211 std::vector<SurfaceId> surfaces_to_copy(
2212 prewalk_result->undrawn_surfaces.begin(),
2213 prewalk_result->undrawn_surfaces.end());
2214 DCHECK(referenced_surfaces_.empty());
2216 for (size_t i = 0; i < surfaces_to_copy.size(); i++) {
2217 SurfaceId surface_id = surfaces_to_copy[i];
2218 ResolvedFrameData* resolved_frame = GetResolvedFrame(surface_id);
2219 if (!resolved_frame)
2222 Surface* surface = resolved_frame->surface();
2223 if (!surface->HasCopyOutputRequests()) {
2224 // Children are not necessarily included in undrawn_surfaces (because
2225 // they weren't referenced directly from a drawn surface), but may have
2226 // copy requests, so make sure to check them as well.
2227 for (const SurfaceId& child_id : surface->active_referenced_surfaces()) {
2228 // Don't iterate over the child Surface if it was already listed as a
2229 // child of a different Surface, or in the case where there's infinite
2231 if (!prewalk_result->undrawn_surfaces.count(child_id)) {
2232 surfaces_to_copy.push_back(child_id);
2233 prewalk_result->undrawn_surfaces.insert(child_id);
2237 prewalk_result->undrawn_surfaces.erase(surface_id);
2238 referenced_surfaces_.insert(surface_id);
2239 CopyPasses(*resolved_frame);
2240 referenced_surfaces_.erase(surface_id);
2245 void SurfaceAggregator::MarkAndPropagateCopyRequestPasses(
2246 ResolvedPassData& resolved_pass) {
2247 if (resolved_pass.aggregation().in_copy_request_pass)
2250 resolved_pass.aggregation().in_copy_request_pass = true;
2251 for (auto* child_pass : resolved_pass.aggregation().embedded_passes) {
2252 MarkAndPropagateCopyRequestPasses(*child_pass);
2256 AggregatedFrame SurfaceAggregator::Aggregate(
2257 const SurfaceId& surface_id,
2258 base::TimeTicks expected_display_time,
2259 gfx::OverlayTransform display_transform,
2260 const gfx::Rect& target_damage,
2261 int64_t display_trace_id) {
2262 DCHECK(!expected_display_time.is_null());
2263 DCHECK(contained_surfaces_.empty());
2265 DCHECK(!is_inside_aggregate_);
2266 is_inside_aggregate_ = true;
2268 root_surface_id_ = surface_id;
2270 // Start recording new stats for this aggregation.
2273 base::ElapsedTimer prewalk_timer;
2274 ResolvedFrameData* resolved_frame = GetResolvedFrame(surface_id);
2276 if (!resolved_frame || !resolved_frame->is_valid()) {
2277 ResetAfterAggregate();
2281 Surface* surface = resolved_frame->surface();
2282 CheckFrameSinksChanged(surface);
2284 display_trace_id_ = display_trace_id;
2285 expected_display_time_ = expected_display_time;
2287 const CompositorFrame& root_surface_frame =
2288 surface->GetActiveOrInterpolatedFrame();
2290 "viz,benchmark,graphics.pipeline", "Graphics.Pipeline",
2291 perfetto::TerminatingFlow::Global(
2292 root_surface_frame.metadata.begin_frame_ack.trace_id),
2293 perfetto::Flow::Global(display_trace_id_),
2294 [trace_id = display_trace_id_](perfetto::EventContext ctx) {
2295 auto* event = ctx.event<perfetto::protos::pbzero::ChromeTrackEvent>();
2296 auto* data = event->set_chrome_graphics_pipeline();
2297 data->set_step(perfetto::protos::pbzero::ChromeGraphicsPipeline::
2298 StepName::STEP_SURFACE_AGGREGATION);
2299 data->set_display_trace_id(trace_id);
2302 AggregatedFrame frame;
2303 frame.top_controls_visible_height =
2304 root_surface_frame.metadata.top_controls_visible_height;
2306 dest_pass_list_ = &frame.render_pass_list;
2307 surface_damage_rect_list_ = &frame.surface_damage_rect_list_;
2309 auto& root_render_pass = root_surface_frame.render_pass_list.back();
2311 // The root render pass on the root surface can not have backdrop filters.
2312 DCHECK(!root_render_pass->backdrop_filters.HasFilterThatMovesPixels());
2314 const gfx::Size viewport_bounds = root_render_pass->output_rect.size();
2315 root_surface_transform_ = gfx::OverlayTransformToTransform(
2316 display_transform, gfx::SizeF(viewport_bounds));
2318 // Reset state that couldn't be reset in ResetAfterAggregate().
2319 damage_ranges_.clear();
2321 DCHECK(referenced_surfaces_.empty());
2323 // The root surface root render pass is the start of the embedding tree.
2324 resolved_frame->GetRootRenderPassData().aggregation().will_draw = true;
2326 PrewalkResult prewalk_result;
2327 gfx::Rect prewalk_damage_rect =
2328 PrewalkSurface(*resolved_frame,
2329 /*parent_pass=*/nullptr,
2330 /*damage_from_parent=*/gfx::Rect(), prewalk_result);
2331 stats_->prewalk_time = prewalk_timer.Elapsed();
2333 root_damage_rect_ = prewalk_damage_rect;
2334 // |root_damage_rect_| is used to restrict aggregating quads only if they
2335 // intersect this area.
2336 root_damage_rect_.Union(target_damage);
2338 // Changing color usage will cause the renderer to reshape the output surface,
2339 // therefore the renderer might expand the damage to the whole frame. The
2340 // following makes sure SA will produce all the quads to cover the full frame.
2341 bool color_usage_changed =
2342 root_content_color_usage_ != prewalk_result.content_color_usage;
2343 if (color_usage_changed) {
2344 root_damage_rect_ = cc::MathUtil::MapEnclosedRectWith2dAxisAlignedTransform(
2345 root_surface_transform_,
2346 gfx::Rect(root_surface_frame.size_in_pixels()));
2347 root_content_color_usage_ = prewalk_result.content_color_usage;
2350 if (prewalk_result.frame_sinks_changed)
2351 manager_->AggregatedFrameSinksChanged();
2353 frame.has_copy_requests = has_copy_requests_ && take_copy_requests_;
2354 frame.content_color_usage = prewalk_result.content_color_usage;
2355 frame.page_fullscreen_mode = prewalk_result.page_fullscreen_mode;
2356 #if BUILDFLAG(IS_EFL)
2357 frame.can_skip_flush = prewalk_result.can_skip_flush;
2360 base::ElapsedTimer copy_timer;
2361 CopyUndrawnSurfaces(&prewalk_result);
2362 referenced_surfaces_.insert(surface_id);
2363 CopyPasses(*resolved_frame);
2364 referenced_surfaces_.erase(surface_id);
2365 DCHECK(referenced_surfaces_.empty());
2366 stats_->copy_time = copy_timer.Elapsed();
2368 RecordStatHistograms();
2370 if (dest_pass_list_->empty()) {
2371 ResetAfterAggregate();
2375 // The root render pass damage might have been expanded by target_damage (the
2376 // area that might need to be recomposited on the target surface). We restrict
2377 // the damage_rect of the root render pass to the one caused by the source
2378 // surfaces, except when drawing delegated ink trails.
2379 // The damage on the root render pass should not include the expanded area
2380 // since Renderer and OverlayProcessor expect the non expanded damage. The
2381 // only exception is when delegated ink trails are being drawn, in which case
2382 // the root render pass needs to contain the expanded area, as |target_damage|
2383 // also reflects the delegated ink trail damage rect.
2384 auto* last_pass = dest_pass_list_->back().get();
2386 if (!color_usage_changed && !last_frame_had_delegated_ink_ &&
2387 !RenderPassNeedsFullDamage(resolved_frame->GetRootRenderPassData())) {
2388 last_pass->damage_rect.Intersect(prewalk_damage_rect);
2391 AddColorConversionPass();
2392 AddRootReadbackPass();
2394 ProcessAddedAndRemovedSurfaces();
2395 contained_surfaces_.swap(previous_contained_surfaces_);
2396 contained_frame_sinks_.swap(previous_contained_frame_sinks_);
2398 ResetAfterAggregate();
2400 for (auto& contained_surface_id : previous_contained_surfaces_) {
2401 surface = manager_->GetSurfaceForId(contained_surface_id);
2403 surface->allocation_group()->TakeAggregatedLatencyInfoUpTo(
2404 surface, &frame.latency_info);
2406 if (!ui::LatencyInfo::Verify(frame.latency_info,
2407 "SurfaceAggregator::Aggregate")) {
2412 if (delegated_ink_metadata_) {
2413 frame.delegated_ink_metadata = std::move(delegated_ink_metadata_);
2414 last_frame_had_delegated_ink_ = true;
2416 last_frame_had_delegated_ink_ = false;
2419 if (frame_annotator_)
2420 frame_annotator_->AnnotateAggregatedFrame(&frame);
2425 void SurfaceAggregator::RecordStatHistograms() {
2426 UMA_HISTOGRAM_COUNTS_100(
2427 "Compositing.SurfaceAggregator.PrewalkedSurfaceCount",
2428 stats_->prewalked_surface_count);
2429 UMA_HISTOGRAM_COUNTS_100("Compositing.SurfaceAggregator.CopiedSurfaceCount",
2430 stats_->copied_surface_count);
2432 UMA_HISTOGRAM_CUSTOM_MICROSECONDS_TIMES(
2433 "Compositing.SurfaceAggregator.PrewalkUs", stats_->prewalk_time,
2434 kHistogramMinTime, kHistogramMaxTime, kHistogramTimeBuckets);
2435 UMA_HISTOGRAM_CUSTOM_MICROSECONDS_TIMES(
2436 "Compositing.SurfaceAggregator.CopyUs", stats_->copy_time,
2437 kHistogramMinTime, kHistogramMaxTime, kHistogramTimeBuckets);
2438 UMA_HISTOGRAM_CUSTOM_MICROSECONDS_TIMES(
2439 "Compositing.SurfaceAggregator.DeclareResourcesUs",
2440 stats_->declare_resources_time, kHistogramMinTime, kHistogramMaxTime,
2441 kHistogramTimeBuckets);
2443 UMA_HISTOGRAM_BOOLEAN("Compositing.SurfaceAggregator.HasCopyRequestsPerFrame",
2444 has_copy_requests_);
2445 UMA_HISTOGRAM_BOOLEAN(
2446 "Compositing.SurfaceAggregator.HasPixelMovingFiltersPerFrame",
2447 stats_->has_pixel_moving_filter);
2448 UMA_HISTOGRAM_BOOLEAN(
2449 "Compositing.SurfaceAggregator.HasPixelMovingBackdropFiltersPerFrame",
2450 has_pixel_moving_backdrop_filter_);
2451 UMA_HISTOGRAM_BOOLEAN(
2452 "Compositing.SurfaceAggregator.HasUnembeddedRenderPassesPerFrame",
2453 stats_->has_unembedded_pass);
2458 void SurfaceAggregator::ResetAfterAggregate() {
2459 DCHECK(is_inside_aggregate_);
2461 is_inside_aggregate_ = false;
2462 dest_pass_list_ = nullptr;
2463 surface_damage_rect_list_ = nullptr;
2464 current_zero_damage_rect_is_not_recorded_ = false;
2465 expected_display_time_ = base::TimeTicks();
2466 display_trace_id_ = -1;
2467 has_pixel_moving_backdrop_filter_ = false;
2468 has_copy_requests_ = false;
2469 resolved_surface_ranges_.clear();
2470 contained_surfaces_.clear();
2471 contained_frame_sinks_.clear();
2473 // Reset resolved frame data from this aggregation.
2474 for (auto& [surface_id, resolved_frame] : resolved_frames_)
2475 resolved_frame.ResetAfterAggregation();
2478 void SurfaceAggregator::SetFullDamageForSurface(const SurfaceId& surface_id) {
2479 auto iter = resolved_frames_.find(surface_id);
2480 if (iter != resolved_frames_.end())
2481 iter->second.SetFullDamageForNextAggregation();
2484 void SurfaceAggregator::SetDisplayColorSpaces(
2485 const gfx::DisplayColorSpaces& display_color_spaces) {
2486 display_color_spaces_ = display_color_spaces;
2489 void SurfaceAggregator::SetMaxRenderTargetSize(int max_size) {
2490 DCHECK_GE(max_size, 0);
2491 max_render_target_size_ = max_size;
2494 bool SurfaceAggregator::NotifySurfaceDamageAndCheckForDisplayDamage(
2495 const SurfaceId& surface_id) {
2496 auto iter = resolved_frames_.find(surface_id);
2497 if (iter != resolved_frames_.end()) {
2498 auto& resolved_frame = iter->second;
2499 DCHECK(resolved_frame.surface()->HasActiveFrame());
2500 if (resolved_frame.surface()
2501 ->GetActiveOrInterpolatedFrame()
2502 .resource_list.empty()) {
2503 // When a client submits a CompositorFrame without resources it's
2504 // typically done to force return of existing resources to the client.
2505 resolved_frame.ForceReleaseResource();
2510 auto it = damage_ranges_.find(surface_id.frame_sink_id());
2511 if (it == damage_ranges_.end())
2514 for (const SurfaceRange& surface_range : it->second) {
2515 if (surface_range.IsInRangeInclusive(surface_id))
2522 bool SurfaceAggregator::HasFrameAnnotator() const {
2523 return !!frame_annotator_;
2526 void SurfaceAggregator::SetFrameAnnotator(
2527 std::unique_ptr<FrameAnnotator> frame_annotator) {
2528 DCHECK(!frame_annotator_);
2529 frame_annotator_ = std::move(frame_annotator);
2532 void SurfaceAggregator::DestroyFrameAnnotator() {
2533 DCHECK(frame_annotator_);
2534 frame_annotator_.reset();
2537 bool SurfaceAggregator::IsRootSurface(const Surface* surface) const {
2538 return surface->surface_id() == root_surface_id_;
2541 // Transform the point and presentation area of the metadata to be in the root
2542 // target space. They need to be in the root target space because they will
2543 // eventually be drawn directly onto the buffer just before being swapped onto
2544 // the screen, so root target space is required so that they are positioned
2545 // correctly. After transforming, they are stored in the
2546 // |delegated_ink_metadata_| member in order to be placed on the final
2547 // aggregated frame, after which the member is then cleared.
2548 void SurfaceAggregator::TransformAndStoreDelegatedInkMetadata(
2549 const gfx::Transform& parent_quad_to_root_target_transform,
2550 const gfx::DelegatedInkMetadata* metadata) {
2551 if (delegated_ink_metadata_) {
2552 // This member could already be populated in two scenarios:
2553 // 1. The delegated ink metadata was committed to a frame's metadata that
2554 // wasn't ultimately used to produce a frame, but is now being used.
2555 // 2. There are two or more ink strokes requesting a delegated ink trail
2557 // In both cases, we want to default to using a "last write wins" strategy
2558 // to determine the metadata to put on the final aggregated frame. This
2559 // avoids potential issues of using stale ink metadata in the first scenario
2560 // by always using the newest one. For the second scenario, it would be a
2561 // very niche use case to have more than one at a time, so the explainer
2562 // specifies using last write wins to decide.
2563 base::TimeTicks stored_time = delegated_ink_metadata_->timestamp();
2564 base::TimeTicks new_time = metadata->timestamp();
2565 if (new_time < stored_time)
2570 parent_quad_to_root_target_transform.MapPoint(metadata->point());
2571 gfx::RectF area = parent_quad_to_root_target_transform.MapRect(
2572 metadata->presentation_area());
2573 delegated_ink_metadata_ = std::make_unique<gfx::DelegatedInkMetadata>(
2574 point, metadata->diameter(), metadata->color(), metadata->timestamp(),
2575 area, metadata->frame_time(), metadata->is_hovering());
2577 TRACE_EVENT_INSTANT2(
2578 "viz", "SurfaceAggregator::TransformAndStoreDelegatedInkMetadata",
2579 TRACE_EVENT_SCOPE_THREAD, "original metadata", metadata->ToString(),
2580 "transformed metadata", delegated_ink_metadata_->ToString());
2583 void SurfaceAggregator::DebugLogSurface(const Surface* surface,
2585 DBG_LOG("aggregator.surface.log", "D%d - %s, %s draws=%s",
2586 static_cast<int>(referenced_surfaces_.size()),
2587 surface->surface_id().ToString().c_str(),
2588 surface->size_in_pixels().ToString().c_str(),
2589 will_draw ? "true" : "false");