diff --git a/flow/compositor_context.cc b/flow/compositor_context.cc index 12e3ed9751ea8..cb119669d6430 100644 --- a/flow/compositor_context.cc +++ b/flow/compositor_context.cc @@ -43,9 +43,8 @@ std::optional FrameDamage::ComputeClipRect( context.ComputeDamage(additional_damage_, horizontal_clip_alignment_, vertical_clip_alignment_); return SkRect::Make(damage_->buffer_damage); - } else { - return std::nullopt; } + return std::nullopt; } CompositorContext::CompositorContext() @@ -125,10 +124,16 @@ RasterStatus CompositorContext::ScopedFrame::Raster( FrameDamage* frame_damage) { TRACE_EVENT0("flutter", "CompositorContext::ScopedFrame::Raster"); - std::optional clip_rect = - frame_damage - ? frame_damage->ComputeClipRect(layer_tree, !ignore_raster_cache) - : std::nullopt; + std::optional clip_rect; + if (frame_damage) { + clip_rect = frame_damage->ComputeClipRect(layer_tree, !ignore_raster_cache); + + if (aiks_context_ && + !ShouldPerformPartialRepaint(clip_rect, layer_tree.frame_size())) { + clip_rect = std::nullopt; + frame_damage->Reset(); + } + } bool root_needs_readback = layer_tree.Preroll( *this, ignore_raster_cache, clip_rect ? *clip_rect : kGiantRect); @@ -146,10 +151,22 @@ RasterStatus CompositorContext::ScopedFrame::Raster( return RasterStatus::kSkipAndRetry; } + if (aiks_context_) { + PaintLayerTreeImpeller(layer_tree, clip_rect, ignore_raster_cache); + } else { + PaintLayerTreeSkia(layer_tree, clip_rect, needs_save_layer, + ignore_raster_cache); + } + return RasterStatus::kSuccess; +} + +void CompositorContext::ScopedFrame::PaintLayerTreeSkia( + flutter::LayerTree& layer_tree, + std::optional clip_rect, + bool needs_save_layer, + bool ignore_raster_cache) { DlAutoCanvasRestore restore(canvas(), clip_rect.has_value()); - // Clearing canvas after preroll reduces one render target switch when preroll - // paints some raster cache. if (canvas()) { if (clip_rect) { canvas()->ClipRect(*clip_rect); @@ -164,9 +181,48 @@ RasterStatus CompositorContext::ScopedFrame::Raster( } canvas()->Clear(DlColor::kTransparent()); } - layer_tree.Paint(*this, ignore_raster_cache); + // The canvas()->Restore() is taken care of by the DlAutoCanvasRestore - return RasterStatus::kSuccess; + layer_tree.Paint(*this, ignore_raster_cache); +} + +void CompositorContext::ScopedFrame::PaintLayerTreeImpeller( + flutter::LayerTree& layer_tree, + std::optional clip_rect, + bool ignore_raster_cache) { + if (canvas() && clip_rect) { + canvas()->Translate(-clip_rect->x(), -clip_rect->y()); + } + + layer_tree.Paint(*this, ignore_raster_cache); +} + +/// @brief The max ratio of pixel width or height to size that is dirty which +/// results in a partial repaint. +/// +/// Performing a partial repaint has a small overhead - Impeller needs to +/// allocate a fairly large resolve texture for the root pass instead of +/// using the drawable texture, and a final blit must be performed. At a +/// minimum, if the damage rect is the entire buffer, we must not perform +/// a partial repaint. Beyond that, we could only experimentally +/// determine what this value should be. From looking at the Flutter +/// Gallery, we noticed that there are occassionally small partial +/// repaints which shave off trivial numbers of pixels. +constexpr float kImpellerRepaintRatio = 0.7f; + +bool CompositorContext::ShouldPerformPartialRepaint( + std::optional damage_rect, + SkISize layer_tree_size) { + if (!damage_rect.has_value()) { + return false; + } + if (damage_rect->width() >= layer_tree_size.width() && + damage_rect->height() >= layer_tree_size.height()) { + return false; + } + auto rx = damage_rect->width() / layer_tree_size.width(); + auto ry = damage_rect->height() / layer_tree_size.height(); + return rx <= kImpellerRepaintRatio || ry <= kImpellerRepaintRatio; } void CompositorContext::OnGrContextCreated() { diff --git a/flow/compositor_context.h b/flow/compositor_context.h index dbc053590a319..a82466ca91d01 100644 --- a/flow/compositor_context.h +++ b/flow/compositor_context.h @@ -91,15 +91,24 @@ class FrameDamage { // See Damage::buffer_damage. std::optional GetBufferDamage() { - return damage_ ? std::make_optional(damage_->buffer_damage) : std::nullopt; + return (damage_ && !ignore_damage_) + ? std::make_optional(damage_->buffer_damage) + : std::nullopt; } + // Remove reported buffer_damage to inform clients that a partial repaint + // should not be performed on this frame. + // frame_damage is required to correctly track accumulated damage for + // subsequent frames. + void Reset() { ignore_damage_ = true; } + private: SkIRect additional_damage_ = SkIRect::MakeEmpty(); std::optional damage_; const LayerTree* prev_layer_tree_ = nullptr; int vertical_clip_alignment_ = 1; int horizontal_clip_alignment_ = 1; + bool ignore_damage_ = false; }; class CompositorContext { @@ -144,6 +153,15 @@ class CompositorContext { FrameDamage* frame_damage); private: + void PaintLayerTreeSkia(flutter::LayerTree& layer_tree, + std::optional clip_rect, + bool needs_save_layer, + bool ignore_raster_cache); + + void PaintLayerTreeImpeller(flutter::LayerTree& layer_tree, + std::optional clip_rect, + bool ignore_raster_cache); + CompositorContext& context_; GrDirectContext* gr_context_; DlCanvas* canvas_; @@ -205,6 +223,12 @@ class CompositorContext { void EndFrame(ScopedFrame& frame, bool enable_instrumentation); + /// @brief Whether Impeller shouild attempt a partial repaint. + /// The Impeller backend requires an additional blit pass, which may + /// not be worthwhile if the damage region is large. + static bool ShouldPerformPartialRepaint(std::optional damage_rect, + SkISize layer_tree_size); + FML_DISALLOW_COPY_AND_ASSIGN(CompositorContext); }; diff --git a/impeller/playground/backend/metal/playground_impl_mtl.mm b/impeller/playground/backend/metal/playground_impl_mtl.mm index 594f8683c72b1..033824b11383b 100644 --- a/impeller/playground/backend/metal/playground_impl_mtl.mm +++ b/impeller/playground/backend/metal/playground_impl_mtl.mm @@ -115,7 +115,9 @@ data_->metal_layer.drawableSize = CGSizeMake(layer_size.width * scale.x, layer_size.height * scale.y); - return SurfaceMTL::WrapCurrentMetalLayerDrawable(context, data_->metal_layer); + auto drawable = + SurfaceMTL::GetMetalDrawableAndValidate(context, data_->metal_layer); + return SurfaceMTL::WrapCurrentMetalLayerDrawable(context, drawable); } } // namespace impeller diff --git a/impeller/renderer/backend/metal/surface_mtl.h b/impeller/renderer/backend/metal/surface_mtl.h index 157e4e6e84454..72bdd3e70e3c1 100644 --- a/impeller/renderer/backend/metal/surface_mtl.h +++ b/impeller/renderer/backend/metal/surface_mtl.h @@ -7,6 +7,7 @@ #include #include "flutter/fml/macros.h" +#include "impeller/geometry/rect.h" #include "impeller/renderer/context.h" #include "impeller/renderer/surface.h" @@ -32,9 +33,14 @@ class SurfaceMTL final : public Surface { /// /// @return A pointer to the wrapped surface or null. /// - static std::unique_ptr WrapCurrentMetalLayerDrawable( + static id GetMetalDrawableAndValidate( const std::shared_ptr& context, CAMetalLayer* layer); + + static std::unique_ptr WrapCurrentMetalLayerDrawable( + const std::shared_ptr& context, + id drawable, + std::optional clip_rect = std::nullopt); #pragma GCC diagnostic pop // |Surface| @@ -42,16 +48,24 @@ class SurfaceMTL final : public Surface { id drawable() const { return drawable_; } + // |Surface| + bool Present() const override; + private: std::weak_ptr context_; - id drawable_ = nil; + std::shared_ptr resolve_texture_; + id drawable_ = nil; + bool requires_blit_ = false; + std::optional clip_rect_; + + static bool ShouldPerformPartialRepaint(std::optional damage_rect); SurfaceMTL(const std::weak_ptr& context, const RenderTarget& target, - id drawable); - - // |Surface| - bool Present() const override; + std::shared_ptr resolve_texture, + id drawable, + bool requires_blit, + std::optional clip_rect); FML_DISALLOW_COPY_AND_ASSIGN(SurfaceMTL); }; diff --git a/impeller/renderer/backend/metal/surface_mtl.mm b/impeller/renderer/backend/metal/surface_mtl.mm index 59ca1c0009c78..c6d6a265472e2 100644 --- a/impeller/renderer/backend/metal/surface_mtl.mm +++ b/impeller/renderer/backend/metal/surface_mtl.mm @@ -5,6 +5,7 @@ #include "impeller/renderer/backend/metal/surface_mtl.h" #include "flutter/fml/trace_event.h" +#include "flutter/impeller/renderer/command_buffer.h" #include "impeller/base/validation.h" #include "impeller/renderer/backend/metal/context_mtl.h" #include "impeller/renderer/backend/metal/formats_mtl.h" @@ -16,7 +17,7 @@ #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunguarded-availability-new" -std::unique_ptr SurfaceMTL::WrapCurrentMetalLayerDrawable( +id SurfaceMTL::GetMetalDrawableAndValidate( const std::shared_ptr& context, CAMetalLayer* layer) { TRACE_EVENT0("impeller", "SurfaceMTL::WrapCurrentMetalLayerDrawable"); @@ -35,23 +36,37 @@ VALIDATION_LOG << "Could not acquire current drawable."; return nullptr; } + return current_drawable; +} - const auto color_format = - FromMTLPixelFormat(current_drawable.texture.pixelFormat); +std::unique_ptr SurfaceMTL::WrapCurrentMetalLayerDrawable( + const std::shared_ptr& context, + id drawable, + std::optional clip_rect) { + bool requires_blit = ShouldPerformPartialRepaint(clip_rect); + const auto color_format = FromMTLPixelFormat(drawable.texture.pixelFormat); if (color_format == PixelFormat::kUnknown) { VALIDATION_LOG << "Unknown drawable color format."; return nullptr; } + // compositor_context.cc will offset the rendering by the clip origin. Here we + // shrink to the size of the clip. This has the same effect as clipping the + // rendering but also creates smaller intermediate passes. + ISize root_size; + if (requires_blit) { + root_size = ISize(clip_rect->size.width, clip_rect->size.height); + } else { + root_size = {static_cast(drawable.texture.width), + static_cast(drawable.texture.height)}; + } TextureDescriptor msaa_tex_desc; msaa_tex_desc.storage_mode = StorageMode::kDeviceTransient; msaa_tex_desc.type = TextureType::kTexture2DMultisample; msaa_tex_desc.sample_count = SampleCount::kCount4; msaa_tex_desc.format = color_format; - msaa_tex_desc.size = { - static_cast(current_drawable.texture.width), - static_cast(current_drawable.texture.height)}; + msaa_tex_desc.size = root_size; msaa_tex_desc.usage = static_cast(TextureUsage::kRenderTarget); auto msaa_tex = context->GetResourceAllocator()->CreateTexture(msaa_tex_desc); @@ -68,8 +83,17 @@ resolve_tex_desc.sample_count = SampleCount::kCount1; resolve_tex_desc.storage_mode = StorageMode::kDevicePrivate; - std::shared_ptr resolve_tex = - std::make_shared(resolve_tex_desc, current_drawable.texture); + // Create color resolve texture. + std::shared_ptr resolve_tex; + if (requires_blit) { + resolve_tex_desc.compression_type = CompressionType::kLossy; + resolve_tex = + context->GetResourceAllocator()->CreateTexture(resolve_tex_desc); + } else { + resolve_tex = + std::make_shared(resolve_tex_desc, drawable.texture); + } + if (!resolve_tex) { VALIDATION_LOG << "Could not wrap resolve texture."; return nullptr; @@ -112,18 +136,42 @@ render_target_desc.SetStencilAttachment(stencil0); // The constructor is private. So make_unique may not be used. - return std::unique_ptr(new SurfaceMTL( - context->weak_from_this(), render_target_desc, current_drawable)); + return std::unique_ptr( + new SurfaceMTL(context->weak_from_this(), render_target_desc, resolve_tex, + drawable, requires_blit, clip_rect)); } SurfaceMTL::SurfaceMTL(const std::weak_ptr& context, const RenderTarget& target, - id drawable) - : Surface(target), context_(context), drawable_(drawable) {} + std::shared_ptr resolve_texture, + id drawable, + bool requires_blit, + std::optional clip_rect) + : Surface(target), + context_(context), + resolve_texture_(std::move(resolve_texture)), + drawable_(drawable), + requires_blit_(requires_blit), + clip_rect_(clip_rect) {} // |Surface| SurfaceMTL::~SurfaceMTL() = default; +bool SurfaceMTL::ShouldPerformPartialRepaint(std::optional damage_rect) { + // compositor_context.cc will conditionally disable partial repaint if the + // damage region is large. If that happened, then a nullopt damage rect + // will be provided here. + if (!damage_rect.has_value()) { + return false; + } + // If the damage rect is 0 in at least one dimension, partial repaint isn't + // performed as we skip right to present. + if (damage_rect->size.width <= 0 || damage_rect->size.height <= 0) { + return false; + } + return true; +} + // |Surface| bool SurfaceMTL::Present() const { if (drawable_ == nil) { @@ -135,6 +183,21 @@ return false; } + if (requires_blit_) { + auto blit_command_buffer = context->CreateCommandBuffer(); + if (!blit_command_buffer) { + return false; + } + auto blit_pass = blit_command_buffer->CreateBlitPass(); + auto current = TextureMTL::Wrapper({}, drawable_.texture); + blit_pass->AddCopy(resolve_texture_, current, std::nullopt, + clip_rect_->origin); + blit_pass->EncodeCommands(context->GetResourceAllocator()); + if (!blit_command_buffer->SubmitCommands()) { + return false; + } + } + // If a transaction is present, `presentDrawable` will present too early. And // so we wait on an empty command buffer to get scheduled instead, which // forces us to also wait for all of the previous command buffers in the queue diff --git a/shell/gpu/gpu_surface_metal_impeller.h b/shell/gpu/gpu_surface_metal_impeller.h index 71a11013218b7..bc4da2c715d84 100644 --- a/shell/gpu/gpu_surface_metal_impeller.h +++ b/shell/gpu/gpu_surface_metal_impeller.h @@ -34,6 +34,10 @@ class SK_API_AVAILABLE_CA_METAL_LAYER GPUSurfaceMetalImpeller : public Surface { std::shared_ptr impeller_renderer_; std::shared_ptr aiks_context_; fml::scoped_nsprotocol> last_drawable_; + bool disable_partial_repaint_ = false; + // Accumulated damage for each framebuffer; Key is address of underlying + // MTLTexture for each drawable + std::map damage_; // |Surface| std::unique_ptr AcquireFrame(const SkISize& size) override; diff --git a/shell/gpu/gpu_surface_metal_impeller.mm b/shell/gpu/gpu_surface_metal_impeller.mm index f9f51311c8d12..ee503202c6de1 100644 --- a/shell/gpu/gpu_surface_metal_impeller.mm +++ b/shell/gpu/gpu_surface_metal_impeller.mm @@ -33,7 +33,14 @@ : delegate_(delegate), impeller_renderer_(CreateImpellerRenderer(context)), aiks_context_( - std::make_shared(impeller_renderer_ ? context : nullptr)) {} + std::make_shared(impeller_renderer_ ? context : nullptr)) { + // If this preference is explicitly set, we allow for disabling partial repaint. + NSNumber* disablePartialRepaint = + [[NSBundle mainBundle] objectForInfoDictionaryKey:@"FLTDisablePartialRepaint"]; + if (disablePartialRepaint != nil) { + disable_partial_repaint_ = disablePartialRepaint.boolValue; + } +} GPUSurfaceMetalImpeller::~GPUSurfaceMetalImpeller() = default; @@ -59,16 +66,18 @@ auto* mtl_layer = (CAMetalLayer*)layer; - auto surface = impeller::SurfaceMTL::WrapCurrentMetalLayerDrawable( + auto drawable = impeller::SurfaceMTL::GetMetalDrawableAndValidate( impeller_renderer_->GetContext(), mtl_layer); if (Settings::kSurfaceDataAccessible) { - last_drawable_.reset([surface->drawable() retain]); + last_drawable_.reset([drawable retain]); } + id metal_drawable = static_cast>(last_drawable_); SurfaceFrame::SubmitCallback submit_callback = - fml::MakeCopyable([renderer = impeller_renderer_, // + fml::MakeCopyable([this, // + renderer = impeller_renderer_, // aiks_context = aiks_context_, // - surface = std::move(surface) // + metal_drawable // ](SurfaceFrame& surface_frame, DlCanvas* canvas) mutable -> bool { if (!aiks_context) { return false; @@ -80,6 +89,35 @@ return false; } + if (!disable_partial_repaint_) { + uintptr_t texture = reinterpret_cast(metal_drawable.texture); + + for (auto& entry : damage_) { + if (entry.first != texture) { + // Accumulate damage for other framebuffers + if (surface_frame.submit_info().frame_damage) { + entry.second.join(*surface_frame.submit_info().frame_damage); + } + } + } + // Reset accumulated damage for current framebuffer + damage_[texture] = SkIRect::MakeEmpty(); + } + + std::optional clip_rect; + if (surface_frame.submit_info().buffer_damage.has_value()) { + auto buffer_damage = surface_frame.submit_info().buffer_damage; + clip_rect = impeller::IRect::MakeXYWH(buffer_damage->x(), buffer_damage->y(), + buffer_damage->width(), buffer_damage->height()); + } + + auto surface = impeller::SurfaceMTL::WrapCurrentMetalLayerDrawable( + impeller_renderer_->GetContext(), metal_drawable, clip_rect); + + if (clip_rect && (clip_rect->size.width == 0 || clip_rect->size.height == 0)) { + return surface->Present(); + } + impeller::DlDispatcher impeller_dispatcher; display_list->Dispatch(impeller_dispatcher); auto picture = impeller_dispatcher.EndRecordingAsPicture(); @@ -92,12 +130,26 @@ })); }); - return std::make_unique(nullptr, // surface - SurfaceFrame::FramebufferInfo{}, // framebuffer info - submit_callback, // submit callback - frame_info, // frame size - nullptr, // context result - true // display list fallback + SurfaceFrame::FramebufferInfo framebuffer_info; + framebuffer_info.supports_readback = true; + + if (!disable_partial_repaint_) { + // Provide accumulated damage to rasterizer (area in current framebuffer that lags behind + // front buffer) + uintptr_t texture = reinterpret_cast(metal_drawable.texture); + auto i = damage_.find(texture); + if (i != damage_.end()) { + framebuffer_info.existing_damage = i->second; + } + framebuffer_info.supports_partial_repaint = true; + } + + return std::make_unique(nullptr, // surface + framebuffer_info, // framebuffer info + submit_callback, // submit callback + frame_info, // frame size + nullptr, // context result + true // display list fallback ); }