diff options
Diffstat (limited to '')
215 files changed, 5274 insertions, 2928 deletions
diff --git a/gfx/2d/2D.h b/gfx/2d/2D.h index e6b94f86ea..f16a0ee0cc 100644 --- a/gfx/2d/2D.h +++ b/gfx/2d/2D.h @@ -85,7 +85,9 @@ namespace mozilla { class Mutex; namespace layers { +class Image; class MemoryOrShmem; +class SurfaceDescriptor; class SurfaceDescriptorBuffer; class TextureData; } // namespace layers @@ -1416,6 +1418,15 @@ class DrawTarget : public external::AtomicRefCounted<DrawTarget> { const DrawSurfaceOptions& aSurfOptions = DrawSurfaceOptions(), const DrawOptions& aOptions = DrawOptions()) = 0; + virtual void DrawSurfaceDescriptor( + const layers::SurfaceDescriptor& aDesc, + const RefPtr<layers::Image>& aImageOfSurfaceDescriptor, const Rect& aDest, + const Rect& aSource, + const DrawSurfaceOptions& aSurfOptions = DrawSurfaceOptions(), + const DrawOptions& aOptions = DrawOptions()) { + MOZ_CRASH("GFX: DrawSurfaceDescriptor"); + } + /** * Draw a surface to the draw target, when the surface will be available * at a later time. This is only valid for recording DrawTargets. @@ -1981,7 +1992,7 @@ class DrawTarget : public external::AtomicRefCounted<DrawTarget> { UserData mUserData; Matrix mTransform; IntRect mOpaqueRect; - bool mTransformDirty : 1; + mutable bool mTransformDirty : 1; bool mPermitSubpixelAA : 1; SurfaceFormat mFormat; diff --git a/gfx/2d/DrawEventRecorder.cpp b/gfx/2d/DrawEventRecorder.cpp index 51f8b836ec..b24b158f56 100644 --- a/gfx/2d/DrawEventRecorder.cpp +++ b/gfx/2d/DrawEventRecorder.cpp @@ -33,6 +33,13 @@ void DrawEventRecorderPrivate::StoreExternalSurfaceRecording( mExternalSurfaces.push_back({aSurface}); } +void DrawEventRecorderPrivate::StoreExternalImageRecording( + const RefPtr<layers::Image>& aImageOfSurfaceDescriptor) { + NS_ASSERT_OWNINGTHREAD(DrawEventRecorderPrivate); + + mExternalImages.push_back({aImageOfSurfaceDescriptor}); +} + void DrawEventRecorderPrivate::StoreSourceSurfaceRecording( SourceSurface* aSurface, const char* aReason) { NS_ASSERT_OWNINGTHREAD(DrawEventRecorderPrivate); diff --git a/gfx/2d/DrawEventRecorder.h b/gfx/2d/DrawEventRecorder.h index d62f098784..c099973fbd 100644 --- a/gfx/2d/DrawEventRecorder.h +++ b/gfx/2d/DrawEventRecorder.h @@ -15,6 +15,7 @@ #include <functional> #include <vector> +#include "ImageContainer.h" #include "mozilla/DataMutex.h" #include "mozilla/ThreadSafeWeakPtr.h" #include "nsTHashMap.h" @@ -83,7 +84,8 @@ class DrawEventRecorderPrivate : public DrawEventRecorder { virtual void RecordEvent(const RecordedEvent& aEvent) = 0; - void RecordEvent(DrawTargetRecording* aDT, const RecordedEvent& aEvent) { + void RecordEvent(const DrawTargetRecording* aDT, + const RecordedEvent& aEvent) { ReferencePtr dt = aDT; if (mCurrentDT != dt) { SetDrawTarget(dt); @@ -93,7 +95,7 @@ class DrawEventRecorderPrivate : public DrawEventRecorder { void SetDrawTarget(ReferencePtr aDT); - void ClearDrawTarget(DrawTargetRecording* aDT) { + void ClearDrawTarget(const DrawTargetRecording* aDT) { ReferencePtr dt = aDT; if (mCurrentDT == dt) { mCurrentDT = nullptr; @@ -185,6 +187,12 @@ class DrawEventRecorderPrivate : public DrawEventRecorder { virtual void StoreSourceSurfaceRecording(SourceSurface* aSurface, const char* aReason); + virtual void StoreImageRecording( + const RefPtr<layers::Image>& aImageOfSurfaceDescriptor, + const char* aReasony) { + MOZ_ASSERT_UNREACHABLE("unexpected to be called"); + } + /** * Used when a source surface is destroyed, aSurface is a void* instead of a * SourceSurface* because this is called during the SourceSurface destructor, @@ -209,11 +217,21 @@ class DrawEventRecorderPrivate : public DrawEventRecorder { aSurfaces = std::move(mExternalSurfaces); } + struct ExternalImageEntry { + RefPtr<layers::Image> mImage; + int64_t mEventCount = -1; + }; + + using ExternalImagesHolder = std::deque<ExternalImageEntry>; + protected: NS_DECL_OWNINGTHREAD void StoreExternalSurfaceRecording(SourceSurface* aSurface, uint64_t aKey); + void StoreExternalImageRecording( + const RefPtr<layers::Image>& aImageOfSurfaceDescriptor); + void ProcessPendingDeletions() { NS_ASSERT_OWNINGTHREAD(DrawEventRecorderPrivate); @@ -253,6 +271,7 @@ class DrawEventRecorderPrivate : public DrawEventRecorder { ReferencePtr mCurrentDT; ExternalSurfacesHolder mExternalSurfaces; + ExternalImagesHolder mExternalImages; bool mExternalFonts; }; diff --git a/gfx/2d/DrawTargetRecording.cpp b/gfx/2d/DrawTargetRecording.cpp index 6edc8cdf91..cdabb80ecd 100644 --- a/gfx/2d/DrawTargetRecording.cpp +++ b/gfx/2d/DrawTargetRecording.cpp @@ -9,6 +9,7 @@ #include "PathRecording.h" #include <stdio.h> +#include "ImageContainer.h" #include "Logging.h" #include "Tools.h" #include "Filters.h" @@ -199,7 +200,7 @@ DrawTargetRecording::DrawTargetRecording( : mRecorder(static_cast<DrawEventRecorderPrivate*>(aRecorder)), mFinalDT(aDT), mRect(IntPoint(0, 0), aSize) { - mRecorder->RecordEvent(layers::RecordedCanvasDrawTargetCreation( + RecordEventSkipFlushTransform(layers::RecordedCanvasDrawTargetCreation( this, aTextureId, aTextureOwnerId, mFinalDT->GetBackendType(), aSize, mFinalDT->GetFormat())); mFormat = mFinalDT->GetFormat(); @@ -214,7 +215,7 @@ DrawTargetRecording::DrawTargetRecording(DrawEventRecorder* aRecorder, mRect(aRect) { MOZ_DIAGNOSTIC_ASSERT(aRecorder->GetRecorderType() != RecorderType::CANVAS); RefPtr<SourceSurface> snapshot = aHasData ? mFinalDT->Snapshot() : nullptr; - mRecorder->RecordEvent( + RecordEventSkipFlushTransform( RecordedDrawTargetCreation(this, mFinalDT->GetBackendType(), mRect, mFinalDT->GetFormat(), aHasData, snapshot)); mFormat = mFinalDT->GetFormat(); @@ -229,21 +230,22 @@ DrawTargetRecording::DrawTargetRecording(const DrawTargetRecording* aDT, } DrawTargetRecording::~DrawTargetRecording() { - mRecorder->RecordEvent(RecordedDrawTargetDestruction(ReferencePtr(this))); + RecordEventSkipFlushTransform( + RecordedDrawTargetDestruction(ReferencePtr(this))); mRecorder->ClearDrawTarget(this); } void DrawTargetRecording::Link(const char* aDestination, const Rect& aRect) { MarkChanged(); - mRecorder->RecordEvent(this, RecordedLink(aDestination, aRect)); + RecordEventSelf(RecordedLink(aDestination, aRect)); } void DrawTargetRecording::Destination(const char* aDestination, const Point& aPoint) { MarkChanged(); - mRecorder->RecordEvent(this, RecordedDestination(aDestination, aPoint)); + RecordEventSelf(RecordedDestination(aDestination, aPoint)); } void DrawTargetRecording::FillRect(const Rect& aRect, const Pattern& aPattern, @@ -252,7 +254,7 @@ void DrawTargetRecording::FillRect(const Rect& aRect, const Pattern& aPattern, EnsurePatternDependenciesStored(aPattern); - mRecorder->RecordEvent(this, RecordedFillRect(aRect, aPattern, aOptions)); + RecordEventSelf(RecordedFillRect(aRect, aPattern, aOptions)); } void DrawTargetRecording::StrokeRect(const Rect& aRect, const Pattern& aPattern, @@ -262,8 +264,8 @@ void DrawTargetRecording::StrokeRect(const Rect& aRect, const Pattern& aPattern, EnsurePatternDependenciesStored(aPattern); - mRecorder->RecordEvent( - this, RecordedStrokeRect(aRect, aPattern, aStrokeOptions, aOptions)); + RecordEventSelf( + RecordedStrokeRect(aRect, aPattern, aStrokeOptions, aOptions)); } void DrawTargetRecording::StrokeLine(const Point& aBegin, const Point& aEnd, @@ -274,8 +276,8 @@ void DrawTargetRecording::StrokeLine(const Point& aBegin, const Point& aEnd, EnsurePatternDependenciesStored(aPattern); - mRecorder->RecordEvent(this, RecordedStrokeLine(aBegin, aEnd, aPattern, - aStrokeOptions, aOptions)); + RecordEventSelf( + RecordedStrokeLine(aBegin, aEnd, aPattern, aStrokeOptions, aOptions)); } void DrawTargetRecording::Fill(const Path* aPath, const Pattern& aPattern, @@ -291,16 +293,14 @@ void DrawTargetRecording::Fill(const Path* aPath, const Pattern& aPattern, auto circle = path->AsCircle(); if (circle) { EnsurePatternDependenciesStored(aPattern); - mRecorder->RecordEvent( - this, RecordedFillCircle(circle.value(), aPattern, aOptions)); + RecordEventSelf(RecordedFillCircle(circle.value(), aPattern, aOptions)); return; } } RefPtr<PathRecording> pathRecording = EnsurePathStored(aPath); EnsurePatternDependenciesStored(aPattern); - - mRecorder->RecordEvent(this, RecordedFill(pathRecording, aPattern, aOptions)); + RecordEventSelf(RecordedFill(pathRecording, aPattern, aOptions)); } struct RecordingFontUserData { @@ -345,7 +345,7 @@ void DrawTargetRecording::DrawGlyphs(ScaledFont* aFont, // playback. RecordedFontDescriptor fontDesc(unscaledFont); if (fontDesc.IsValid()) { - mRecorder->RecordEvent(fontDesc); + RecordEventSkipFlushTransform(fontDesc); } else { RecordedFontData fontData(unscaledFont); RecordedFontDetails fontDetails; @@ -353,10 +353,10 @@ void DrawTargetRecording::DrawGlyphs(ScaledFont* aFont, // Try to serialise the whole font, just in case this is a web font // that is not present on the system. if (!mRecorder->HasStoredFontData(fontDetails.fontDataKey)) { - mRecorder->RecordEvent(fontData); + RecordEventSkipFlushTransform(fontData); mRecorder->AddStoredFontData(fontDetails.fontDataKey); } - mRecorder->RecordEvent( + RecordEventSkipFlushTransform( RecordedUnscaledFontCreation(unscaledFont, fontDetails)); } else { gfxWarning() << "DrawTargetRecording::FillGlyphs failed to serialise " @@ -364,7 +364,8 @@ void DrawTargetRecording::DrawGlyphs(ScaledFont* aFont, } } } - mRecorder->RecordEvent(RecordedScaledFontCreation(aFont, unscaledFont)); + RecordEventSkipFlushTransform( + RecordedScaledFontCreation(aFont, unscaledFont)); RecordingFontUserData* userData = new RecordingFontUserData; userData->refPtr = aFont; userData->unscaledFont = unscaledFont; @@ -375,13 +376,12 @@ void DrawTargetRecording::DrawGlyphs(ScaledFont* aFont, } if (aStrokeOptions) { - mRecorder->RecordEvent( - this, RecordedStrokeGlyphs(aFont, aPattern, *aStrokeOptions, aOptions, - aBuffer.mGlyphs, aBuffer.mNumGlyphs)); + RecordEventSelf(RecordedStrokeGlyphs(aFont, aPattern, *aStrokeOptions, + aOptions, aBuffer.mGlyphs, + aBuffer.mNumGlyphs)); } else { - mRecorder->RecordEvent( - this, RecordedFillGlyphs(aFont, aPattern, aOptions, aBuffer.mGlyphs, - aBuffer.mNumGlyphs)); + RecordEventSelf(RecordedFillGlyphs(aFont, aPattern, aOptions, + aBuffer.mGlyphs, aBuffer.mNumGlyphs)); } } @@ -407,7 +407,7 @@ void DrawTargetRecording::Mask(const Pattern& aSource, const Pattern& aMask, EnsurePatternDependenciesStored(aSource); EnsurePatternDependenciesStored(aMask); - mRecorder->RecordEvent(this, RecordedMask(aSource, aMask, aOptions)); + RecordEventSelf(RecordedMask(aSource, aMask, aOptions)); } void DrawTargetRecording::MaskSurface(const Pattern& aSource, @@ -422,8 +422,7 @@ void DrawTargetRecording::MaskSurface(const Pattern& aSource, EnsurePatternDependenciesStored(aSource); EnsureSurfaceStoredRecording(mRecorder, aMask, "MaskSurface"); - mRecorder->RecordEvent( - this, RecordedMaskSurface(aSource, aMask, aOffset, aOptions)); + RecordEventSelf(RecordedMaskSurface(aSource, aMask, aOffset, aOptions)); } void DrawTargetRecording::Stroke(const Path* aPath, const Pattern& aPattern, @@ -436,18 +435,16 @@ void DrawTargetRecording::Stroke(const Path* aPath, const Pattern& aPattern, auto circle = path->AsCircle(); if (circle && circle->closed) { EnsurePatternDependenciesStored(aPattern); - mRecorder->RecordEvent( - this, RecordedStrokeCircle(circle.value(), aPattern, aStrokeOptions, - aOptions)); + RecordEventSelf(RecordedStrokeCircle(circle.value(), aPattern, + aStrokeOptions, aOptions)); return; } auto line = path->AsLine(); if (line) { EnsurePatternDependenciesStored(aPattern); - mRecorder->RecordEvent( - this, RecordedStrokeLine(line->origin, line->destination, aPattern, - aStrokeOptions, aOptions)); + RecordEventSelf(RecordedStrokeLine(line->origin, line->destination, + aPattern, aStrokeOptions, aOptions)); return; } } @@ -455,8 +452,8 @@ void DrawTargetRecording::Stroke(const Path* aPath, const Pattern& aPattern, RefPtr<PathRecording> pathRecording = EnsurePathStored(aPath); EnsurePatternDependenciesStored(aPattern); - mRecorder->RecordEvent( - this, RecordedStroke(pathRecording, aPattern, aStrokeOptions, aOptions)); + RecordEventSelf( + RecordedStroke(pathRecording, aPattern, aStrokeOptions, aOptions)); } void DrawTargetRecording::DrawShadow(const Path* aPath, const Pattern& aPattern, @@ -468,9 +465,8 @@ void DrawTargetRecording::DrawShadow(const Path* aPath, const Pattern& aPattern, RefPtr<PathRecording> pathRecording = EnsurePathStored(aPath); EnsurePatternDependenciesStored(aPattern); - mRecorder->RecordEvent( - this, RecordedDrawShadow(pathRecording, aPattern, aShadow, aOptions, - aStrokeOptions)); + RecordEventSelf(RecordedDrawShadow(pathRecording, aPattern, aShadow, aOptions, + aStrokeOptions)); } void DrawTargetRecording::MarkChanged() { mIsDirty = true; } @@ -479,7 +475,7 @@ already_AddRefed<SourceSurface> DrawTargetRecording::Snapshot() { RefPtr<SourceSurface> retSurf = new SourceSurfaceRecording(mRect.Size(), mFormat, mRecorder); - mRecorder->RecordEvent(this, RecordedSnapshot(ReferencePtr(retSurf))); + RecordEventSelfSkipFlushTransform(RecordedSnapshot(ReferencePtr(retSurf))); return retSurf.forget(); } @@ -489,8 +485,8 @@ already_AddRefed<SourceSurface> DrawTargetRecording::IntoLuminanceSource( RefPtr<SourceSurface> retSurf = new SourceSurfaceRecording(mRect.Size(), SurfaceFormat::A8, mRecorder); - mRecorder->RecordEvent( - this, RecordedIntoLuminanceSource(retSurf, aLuminanceType, aOpacity)); + RecordEventSelfSkipFlushTransform( + RecordedIntoLuminanceSource(retSurf, aLuminanceType, aOpacity)); return retSurf.forget(); } @@ -508,11 +504,11 @@ already_AddRefed<SourceSurface> SourceSurfaceRecording::ExtractSubrect( } void DrawTargetRecording::Flush() { - mRecorder->RecordEvent(this, RecordedFlush()); + RecordEventSelfSkipFlushTransform(RecordedFlush()); } void DrawTargetRecording::DetachAllSnapshots() { - mRecorder->RecordEvent(this, RecordedDetachAllSnapshots()); + RecordEventSelfSkipFlushTransform(RecordedDetachAllSnapshots()); } void DrawTargetRecording::DrawSurface(SourceSurface* aSurface, @@ -527,8 +523,22 @@ void DrawTargetRecording::DrawSurface(SourceSurface* aSurface, EnsureSurfaceStoredRecording(mRecorder, aSurface, "DrawSurface"); - mRecorder->RecordEvent(this, RecordedDrawSurface(aSurface, aDest, aSource, - aSurfOptions, aOptions)); + RecordEventSelf( + RecordedDrawSurface(aSurface, aDest, aSource, aSurfOptions, aOptions)); +} + +void DrawTargetRecording::DrawSurfaceDescriptor( + const layers::SurfaceDescriptor& aDesc, + const RefPtr<layers::Image>& aImageOfSurfaceDescriptor, const Rect& aDest, + const Rect& aSource, const DrawSurfaceOptions& aSurfOptions, + const DrawOptions& aOptions) { + MarkChanged(); + + mRecorder->StoreImageRecording(aImageOfSurfaceDescriptor, + "DrawSurfaceDescriptor"); + + RecordEventSelf(RecordedDrawSurfaceDescriptor(aDesc, aDest, aSource, + aSurfOptions, aOptions)); } void DrawTargetRecording::DrawDependentSurface(uint64_t aId, @@ -536,7 +546,7 @@ void DrawTargetRecording::DrawDependentSurface(uint64_t aId, MarkChanged(); mRecorder->AddDependentSurface(aId); - mRecorder->RecordEvent(this, RecordedDrawDependentSurface(aId, aDest)); + RecordEventSelf(RecordedDrawDependentSurface(aId, aDest)); } void DrawTargetRecording::DrawSurfaceWithShadow(SourceSurface* aSurface, @@ -551,8 +561,7 @@ void DrawTargetRecording::DrawSurfaceWithShadow(SourceSurface* aSurface, EnsureSurfaceStoredRecording(mRecorder, aSurface, "DrawSurfaceWithShadow"); - mRecorder->RecordEvent( - this, RecordedDrawSurfaceWithShadow(aSurface, aDest, aShadow, aOp)); + RecordEventSelf(RecordedDrawSurfaceWithShadow(aSurface, aDest, aShadow, aOp)); } void DrawTargetRecording::DrawFilter(FilterNode* aNode, const Rect& aSourceRect, @@ -566,15 +575,14 @@ void DrawTargetRecording::DrawFilter(FilterNode* aNode, const Rect& aSourceRect, MOZ_ASSERT(mRecorder->HasStoredObject(aNode)); - mRecorder->RecordEvent( - this, RecordedDrawFilter(aNode, aSourceRect, aDestPoint, aOptions)); + RecordEventSelf(RecordedDrawFilter(aNode, aSourceRect, aDestPoint, aOptions)); } already_AddRefed<FilterNode> DrawTargetRecording::CreateFilter( FilterType aType) { RefPtr<FilterNode> retNode = new FilterNodeRecording(mRecorder); - mRecorder->RecordEvent(this, RecordedFilterNodeCreation(retNode, aType)); + RecordEventSelfSkipFlushTransform(RecordedFilterNodeCreation(retNode, aType)); return retNode.forget(); } @@ -582,7 +590,7 @@ already_AddRefed<FilterNode> DrawTargetRecording::CreateFilter( void DrawTargetRecording::ClearRect(const Rect& aRect) { MarkChanged(); - mRecorder->RecordEvent(this, RecordedClearRect(aRect)); + RecordEventSelf(RecordedClearRect(aRect)); } void DrawTargetRecording::CopySurface(SourceSurface* aSurface, @@ -596,8 +604,7 @@ void DrawTargetRecording::CopySurface(SourceSurface* aSurface, EnsureSurfaceStoredRecording(mRecorder, aSurface, "CopySurface"); - mRecorder->RecordEvent( - this, RecordedCopySurface(aSurface, aSourceRect, aDestination)); + RecordEventSelf(RecordedCopySurface(aSurface, aSourceRect, aDestination)); } void DrawTargetRecording::PushClip(const Path* aPath) { @@ -616,16 +623,15 @@ void DrawTargetRecording::PushClip(const Path* aPath) { } RefPtr<PathRecording> pathRecording = EnsurePathStored(aPath); - - mRecorder->RecordEvent(this, RecordedPushClip(ReferencePtr(pathRecording))); + RecordEventSelf(RecordedPushClip(ReferencePtr(pathRecording))); } void DrawTargetRecording::PushClipRect(const Rect& aRect) { - mRecorder->RecordEvent(this, RecordedPushClipRect(aRect)); + RecordEventSelf(RecordedPushClipRect(aRect)); } void DrawTargetRecording::PopClip() { - mRecorder->RecordEvent(this, RecordedPopClip()); + RecordEventSelfSkipFlushTransform(RecordedPopClip()); } void DrawTargetRecording::PushLayer(bool aOpaque, Float aOpacity, @@ -637,9 +643,8 @@ void DrawTargetRecording::PushLayer(bool aOpaque, Float aOpacity, EnsureSurfaceStoredRecording(mRecorder, aMask, "PushLayer"); } - mRecorder->RecordEvent( - this, RecordedPushLayer(aOpaque, aOpacity, aMask, aMaskTransform, aBounds, - aCopyBackground)); + RecordEventSelf(RecordedPushLayer(aOpaque, aOpacity, aMask, aMaskTransform, + aBounds, aCopyBackground)); PushedLayer layer(GetPermitSubpixelAA()); mPushedLayers.push_back(layer); @@ -656,9 +661,9 @@ void DrawTargetRecording::PushLayerWithBlend(bool aOpaque, Float aOpacity, EnsureSurfaceStoredRecording(mRecorder, aMask, "PushLayer"); } - mRecorder->RecordEvent(this, RecordedPushLayerWithBlend( - aOpaque, aOpacity, aMask, aMaskTransform, - aBounds, aCopyBackground, aCompositionOp)); + RecordEventSelf(RecordedPushLayerWithBlend(aOpaque, aOpacity, aMask, + aMaskTransform, aBounds, + aCopyBackground, aCompositionOp)); PushedLayer layer(GetPermitSubpixelAA()); mPushedLayers.push_back(layer); @@ -668,7 +673,7 @@ void DrawTargetRecording::PushLayerWithBlend(bool aOpaque, Float aOpacity, void DrawTargetRecording::PopLayer() { MarkChanged(); - mRecorder->RecordEvent(this, RecordedPopLayer()); + RecordEventSelfSkipFlushTransform(RecordedPopLayer()); const PushedLayer& layer = mPushedLayers.back(); DrawTarget::SetPermitSubpixelAA(layer.mOldPermitSubpixelAA); @@ -719,8 +724,8 @@ already_AddRefed<SourceSurface> DrawTargetRecording::OptimizeSourceSurface( RefPtr<SourceSurface> retSurf = new SourceSurfaceRecording( aSurface->GetSize(), aSurface->GetFormat(), mRecorder, aSurface); - mRecorder->RecordEvent(const_cast<DrawTargetRecording*>(this), - RecordedOptimizeSourceSurface(aSurface, retSurf)); + RecordEventSelfSkipFlushTransform( + RecordedOptimizeSourceSurface(aSurface, retSurf)); userData->optimizedSurface = retSurf; return retSurf.forget(); @@ -736,7 +741,6 @@ DrawTargetRecording::CreateSourceSurfaceFromNativeSurface( already_AddRefed<DrawTarget> DrawTargetRecording::CreateSimilarDrawTargetWithBacking( const IntSize& aSize, SurfaceFormat aFormat) const { - RefPtr<DrawTarget> similarDT; if (mFinalDT->CanCreateSimilarDrawTarget(aSize, aFormat)) { // If the requested similar draw target is too big, then we should try to // rasterize on the content side to avoid duplicating the effort when a @@ -763,12 +767,12 @@ DrawTargetRecording::CreateSimilarDrawTargetWithBacking( already_AddRefed<DrawTarget> DrawTargetRecording::CreateSimilarDrawTarget( const IntSize& aSize, SurfaceFormat aFormat) const { - RefPtr<DrawTarget> similarDT; + RefPtr<DrawTargetRecording> similarDT; if (mFinalDT->CanCreateSimilarDrawTarget(aSize, aFormat)) { similarDT = new DrawTargetRecording(this, IntRect(IntPoint(0, 0), aSize), aFormat); - mRecorder->RecordEvent( - const_cast<DrawTargetRecording*>(this), + similarDT->SetOptimizeTransform(mOptimizeTransform); + RecordEventSelfSkipFlushTransform( RecordedCreateSimilarDrawTarget(similarDT.get(), aSize, aFormat)); } else if (XRE_IsContentProcess()) { // Crash any content process that calls this function with arguments that @@ -789,11 +793,12 @@ bool DrawTargetRecording::CanCreateSimilarDrawTarget( RefPtr<DrawTarget> DrawTargetRecording::CreateClippedDrawTarget( const Rect& aBounds, SurfaceFormat aFormat) { - RefPtr<DrawTarget> similarDT; - similarDT = new DrawTargetRecording(this, mRect, aFormat); - mRecorder->RecordEvent( - this, RecordedCreateClippedDrawTarget(similarDT.get(), aBounds, aFormat)); - similarDT->SetTransform(mTransform); + RefPtr<DrawTargetRecording> similarDT = + new DrawTargetRecording(this, mRect, aFormat); + similarDT->SetOptimizeTransform(mOptimizeTransform); + RecordEventSelf( + RecordedCreateClippedDrawTarget(similarDT.get(), aBounds, aFormat)); + similarDT->mTransform = similarDT->mRecordedTransform = mTransform; return similarDT; } @@ -801,14 +806,16 @@ already_AddRefed<DrawTarget> DrawTargetRecording::CreateSimilarDrawTargetForFilter( const IntSize& aMaxSize, SurfaceFormat aFormat, FilterNode* aFilter, FilterNode* aSource, const Rect& aSourceRect, const Point& aDestPoint) { - RefPtr<DrawTarget> similarDT; + RefPtr<DrawTargetRecording> similarDT; if (mFinalDT->CanCreateSimilarDrawTarget(aMaxSize, aFormat)) { similarDT = new DrawTargetRecording(this, IntRect(IntPoint(0, 0), aMaxSize), aFormat); - mRecorder->RecordEvent( - this, RecordedCreateDrawTargetForFilter(similarDT.get(), aMaxSize, - aFormat, aFilter, aSource, - aSourceRect, aDestPoint)); + similarDT->SetOptimizeTransform(mOptimizeTransform); + // RecordedCreateDrawTargetForFilter::PlayEvent uses the transform, despite + // the fact that the underlying DrawTarget does not. + RecordEventSelf(RecordedCreateDrawTargetForFilter(similarDT.get(), aMaxSize, + aFormat, aFilter, aSource, + aSourceRect, aDestPoint)); } else if (XRE_IsContentProcess()) { // See CreateSimilarDrawTarget MOZ_CRASH( @@ -828,19 +835,22 @@ already_AddRefed<GradientStops> DrawTargetRecording::CreateGradientStops( GradientStop* aStops, uint32_t aNumStops, ExtendMode aExtendMode) const { RefPtr<GradientStops> retStops = new GradientStopsRecording(mRecorder); - mRecorder->RecordEvent( - const_cast<DrawTargetRecording*>(this), + RecordEventSelfSkipFlushTransform( RecordedGradientStopsCreation(retStops, aStops, aNumStops, aExtendMode)); return retStops.forget(); } void DrawTargetRecording::SetTransform(const Matrix& aTransform) { - if (mTransform.ExactlyEquals(aTransform)) { - return; - } DrawTarget::SetTransform(aTransform); - mRecorder->RecordEvent(this, RecordedSetTransform(aTransform)); + if (!mOptimizeTransform) { + FlushTransform(); + } +} + +void DrawTargetRecording::RecordTransform(const Matrix& aTransform) const { + RecordEventSelfSkipFlushTransform(RecordedSetTransform(aTransform)); + mRecordedTransform = aTransform; } void DrawTargetRecording::SetPermitSubpixelAA(bool aPermitSubpixelAA) { @@ -848,7 +858,8 @@ void DrawTargetRecording::SetPermitSubpixelAA(bool aPermitSubpixelAA) { return; } DrawTarget::SetPermitSubpixelAA(aPermitSubpixelAA); - mRecorder->RecordEvent(this, RecordedSetPermitSubpixelAA(aPermitSubpixelAA)); + RecordEventSelfSkipFlushTransform( + RecordedSetPermitSubpixelAA(aPermitSubpixelAA)); } already_AddRefed<PathRecording> DrawTargetRecording::EnsurePathStored( @@ -874,7 +885,7 @@ already_AddRefed<PathRecording> DrawTargetRecording::EnsurePathStored( // It's important that AddStoredObject or TryAddStoredObject is called before // this because that will run any pending processing required by recorded // objects that have been deleted off the main thread. - mRecorder->RecordEvent(this, RecordedPathCreation(pathRecording.get())); + RecordEventSelfSkipFlushTransform(RecordedPathCreation(pathRecording.get())); pathRecording->mStoredRecorders.push_back(mRecorder); return pathRecording.forget(); @@ -887,13 +898,16 @@ void DrawTargetRecording::FlushItem(const IntRect& aBounds) { // Reinitialize the recorder (FlushItem will write a new recording header) // Tell the new recording about our draw target // This code should match what happens in the DrawTargetRecording constructor. - MOZ_DIAGNOSTIC_ASSERT(mRecorder->GetRecorderType() != RecorderType::CANVAS); - mRecorder->RecordEvent( + MOZ_DIAGNOSTIC_ASSERT(mRecorder->GetRecorderType() == + RecorderType::WEBRENDER); + RecordEventSkipFlushTransform( RecordedDrawTargetCreation(this, mFinalDT->GetBackendType(), mRect, mFinalDT->GetFormat(), false, nullptr)); - // Add the current transform to the new recording - mRecorder->RecordEvent(this, - RecordedSetTransform(DrawTarget::GetTransform())); + // RecordedDrawTargetCreation can actually reuse the base DrawTarget for the + // recording, but we cannot conclude that from here, so force the transform + // to be recorded. + RecordTransform(mTransform); + mTransformDirty = false; } void DrawTargetRecording::EnsurePatternDependenciesStored( diff --git a/gfx/2d/DrawTargetRecording.h b/gfx/2d/DrawTargetRecording.h index 239d7ccfd4..5b24bd2987 100644 --- a/gfx/2d/DrawTargetRecording.h +++ b/gfx/2d/DrawTargetRecording.h @@ -18,7 +18,7 @@ struct RemoteTextureOwnerId; namespace gfx { -class DrawTargetRecording : public DrawTarget { +class DrawTargetRecording final : public DrawTarget { public: MOZ_DECLARE_REFCOUNTED_VIRTUAL_TYPENAME(DrawTargetRecording, override) DrawTargetRecording(DrawEventRecorder* aRecorder, DrawTarget* aDT, @@ -71,6 +71,13 @@ class DrawTargetRecording : public DrawTarget { const DrawSurfaceOptions& aSurfOptions = DrawSurfaceOptions(), const DrawOptions& aOptions = DrawOptions()) override; + virtual void DrawSurfaceDescriptor( + const layers::SurfaceDescriptor& aDesc, + const RefPtr<layers::Image>& aImageOfSurfaceDescriptor, const Rect& aDest, + const Rect& aSource, + const DrawSurfaceOptions& aSurfOptions = DrawSurfaceOptions(), + const DrawOptions& aOptions = DrawOptions()) override; + virtual void DrawDependentSurface(uint64_t aId, const Rect& aDest) override; virtual void DrawFilter(FilterNode* aNode, const Rect& aSourceRect, @@ -369,6 +376,10 @@ class DrawTargetRecording : public DrawTarget { void MarkClean() { mIsDirty = false; } + void SetOptimizeTransform(bool aOptimizeTransform) { + mOptimizeTransform = aOptimizeTransform; + } + private: /** * Used for creating a DrawTargetRecording for a CreateSimilarDrawTarget call. @@ -380,6 +391,35 @@ class DrawTargetRecording : public DrawTarget { DrawTargetRecording(const DrawTargetRecording* aDT, IntRect aRect, SurfaceFormat aFormat); + void RecordTransform(const Matrix& aTransform) const; + + void FlushTransform() const { + if (mTransformDirty) { + if (!mRecordedTransform.ExactlyEquals(mTransform)) { + RecordTransform(mTransform); + } + mTransformDirty = false; + } + } + + void RecordEvent(const RecordedEvent& aEvent) const { + FlushTransform(); + mRecorder->RecordEvent(aEvent); + } + + void RecordEventSelf(const RecordedEvent& aEvent) const { + FlushTransform(); + mRecorder->RecordEvent(this, aEvent); + } + + void RecordEventSkipFlushTransform(const RecordedEvent& aEvent) const { + mRecorder->RecordEvent(aEvent); + } + + void RecordEventSelfSkipFlushTransform(const RecordedEvent& aEvent) const { + mRecorder->RecordEvent(this, aEvent); + } + Path* GetPathForPathRecording(const Path* aPath) const; already_AddRefed<PathRecording> EnsurePathStored(const Path* aPath); void EnsurePatternDependenciesStored(const Pattern& aPattern); @@ -403,6 +443,10 @@ class DrawTargetRecording : public DrawTarget { std::vector<PushedLayer> mPushedLayers; bool mIsDirty = false; + bool mOptimizeTransform = false; + + // Last transform that was used in the recording. + mutable Matrix mRecordedTransform; }; } // namespace gfx diff --git a/gfx/2d/RecordedEvent.cpp b/gfx/2d/RecordedEvent.cpp index 265ee1904b..c89bd99d4c 100644 --- a/gfx/2d/RecordedEvent.cpp +++ b/gfx/2d/RecordedEvent.cpp @@ -70,6 +70,8 @@ std::string RecordedEvent::GetEventName(EventType aType) { return "Stroke"; case DRAWSURFACE: return "DrawSurface"; + case DRAWSURFACEDESCRIPTOR: + return "DrawSurfaceDescriptor"; case DRAWDEPENDENTSURFACE: return "DrawDependentSurface"; case DRAWSURFACEWITHSHADOW: diff --git a/gfx/2d/RecordedEvent.h b/gfx/2d/RecordedEvent.h index a8801ddd1f..835460ff25 100644 --- a/gfx/2d/RecordedEvent.h +++ b/gfx/2d/RecordedEvent.h @@ -102,6 +102,12 @@ class Translator { virtual already_AddRefed<SourceSurface> LookupExternalSurface(uint64_t aKey) { return nullptr; } + virtual already_AddRefed<SourceSurface> + LookupSourceSurfaceFromSurfaceDescriptor( + const layers::SurfaceDescriptor& aDesc) { + MOZ_ASSERT_UNREACHABLE("unexpected to be called"); + return nullptr; + } void DrawDependentSurface(uint64_t aKey, const Rect& aRect); virtual void AddDrawTarget(ReferencePtr aRefPtr, DrawTarget* aDT) = 0; virtual void RemoveDrawTarget(ReferencePtr aRefPtr) = 0; @@ -389,6 +395,7 @@ class RecordedEvent { MASK, STROKE, DRAWSURFACE, + DRAWSURFACEDESCRIPTOR, DRAWDEPENDENTSURFACE, DRAWSURFACEWITHSHADOW, DRAWSHADOW, diff --git a/gfx/2d/RecordedEventImpl.h b/gfx/2d/RecordedEventImpl.h index a880536bf8..1d8a275c63 100644 --- a/gfx/2d/RecordedEventImpl.h +++ b/gfx/2d/RecordedEventImpl.h @@ -17,6 +17,8 @@ #include "ScaledFontBase.h" #include "SFNTData.h" +#include "mozilla/layers/LayersSurfaces.h" + namespace mozilla { namespace gfx { @@ -852,6 +854,41 @@ class RecordedDrawSurface : public RecordedEventDerived<RecordedDrawSurface> { DrawOptions mOptions; }; +class RecordedDrawSurfaceDescriptor + : public RecordedEventDerived<RecordedDrawSurfaceDescriptor> { + public: + RecordedDrawSurfaceDescriptor(const layers::SurfaceDescriptor& aDesc, + const Rect& aDest, const Rect& aSource, + const DrawSurfaceOptions& aDSOptions, + const DrawOptions& aOptions) + : RecordedEventDerived(DRAWSURFACEDESCRIPTOR), + mDesc(aDesc), + mDest(aDest), + mSource(aSource), + mDSOptions(aDSOptions), + mOptions(aOptions) {} + + bool PlayEvent(Translator* aTranslator) const override; + + template <class S> + void Record(S& aStream) const; + void OutputSimpleEventInfo(std::stringstream& aStringStream) const override; + + std::string GetName() const override { return "DrawSurfaceDescriptor"; } + + private: + friend class RecordedEvent; + + template <class S> + MOZ_IMPLICIT RecordedDrawSurfaceDescriptor(S& aStream); + + layers::SurfaceDescriptor mDesc; + Rect mDest; + Rect mSource; + DrawSurfaceOptions mDSOptions; + DrawOptions mOptions; +}; + class RecordedDrawDependentSurface : public RecordedEventDerived<RecordedDrawDependentSurface> { public: @@ -3141,6 +3178,52 @@ inline void RecordedDrawSurface::OutputSimpleEventInfo( aStringStream << "DrawSurface (" << mRefSource << ")"; } +inline bool RecordedDrawSurfaceDescriptor::PlayEvent( + Translator* aTranslator) const { + DrawTarget* dt = aTranslator->GetCurrentDrawTarget(); + if (!dt) { + return false; + } + + RefPtr<SourceSurface> surface = + aTranslator->LookupSourceSurfaceFromSurfaceDescriptor(mDesc); + if (!surface) { + return false; + } + + RefPtr<SourceSurface> opt = dt->OptimizeSourceSurface(surface); + if (opt) { + surface = opt; + } + + dt->DrawSurface(surface, mDest, mSource, mDSOptions, mOptions); + return true; +} + +template <class S> +void RecordedDrawSurfaceDescriptor::Record(S& aStream) const { + WriteElement(aStream, mDesc); + WriteElement(aStream, mDest); + WriteElement(aStream, mSource); + WriteElement(aStream, mDSOptions); + WriteElement(aStream, mOptions); +} + +template <class S> +RecordedDrawSurfaceDescriptor::RecordedDrawSurfaceDescriptor(S& aStream) + : RecordedEventDerived(DRAWSURFACEDESCRIPTOR) { + ReadElement(aStream, mDesc); + ReadElement(aStream, mDest); + ReadElement(aStream, mSource); + ReadDrawSurfaceOptions(aStream, mDSOptions); + ReadDrawOptions(aStream, mOptions); +} + +inline void RecordedDrawSurfaceDescriptor::OutputSimpleEventInfo( + std::stringstream& aStringStream) const { + aStringStream << "DrawSurfaceDescriptor (" << mDesc.type() << ")"; +} + inline bool RecordedDrawDependentSurface::PlayEvent( Translator* aTranslator) const { aTranslator->DrawDependentSurface(mId, mDest); @@ -4379,6 +4462,7 @@ inline void RecordedDestination::OutputSimpleEventInfo( f(MASK, RecordedMask); \ f(STROKE, RecordedStroke); \ f(DRAWSURFACE, RecordedDrawSurface); \ + f(DRAWSURFACEDESCRIPTOR, RecordedDrawSurfaceDescriptor); \ f(DRAWDEPENDENTSURFACE, RecordedDrawDependentSurface); \ f(DRAWSURFACEWITHSHADOW, RecordedDrawSurfaceWithShadow); \ f(DRAWSHADOW, RecordedDrawShadow); \ diff --git a/gfx/2d/SkConvolver.cpp b/gfx/2d/SkConvolver.cpp index befe8da30b..b89a486d48 100644 --- a/gfx/2d/SkConvolver.cpp +++ b/gfx/2d/SkConvolver.cpp @@ -5,7 +5,6 @@ // found in the gfx/skia/LICENSE file. #include "SkConvolver.h" -#include "mozilla/Vector.h" #ifdef USE_SSE2 # include "mozilla/SSE.h" @@ -235,9 +234,11 @@ class CircularRowBuffer { : fRowByteWidth(destRowPixelWidth * 4), fNumRows(maxYFilterSize), fNextRow(0), - fNextRowCoordinate(firstInputRow) { - fBuffer.resize(fRowByteWidth * maxYFilterSize); - fRowAddresses.resize(fNumRows); + fNextRowCoordinate(firstInputRow) {} + + bool AllocBuffer() { + return fBuffer.resize(fRowByteWidth * fNumRows) && + fRowAddresses.resize(fNumRows); } // Moves to the next row in the buffer, returning a pointer to the beginning @@ -288,7 +289,7 @@ class CircularRowBuffer { private: // The buffer storing the rows. They are packed, each one fRowByteWidth. - std::vector<unsigned char> fBuffer; + mozilla::Vector<unsigned char> fBuffer; // Number of bytes per row in the |buffer|. int fRowByteWidth; @@ -305,14 +306,14 @@ class CircularRowBuffer { int fNextRowCoordinate; // Buffer used by GetRowAddresses(). - std::vector<unsigned char*> fRowAddresses; + mozilla::Vector<unsigned char*> fRowAddresses; }; SkConvolutionFilter1D::SkConvolutionFilter1D() : fMaxFilter(0) {} SkConvolutionFilter1D::~SkConvolutionFilter1D() = default; -void SkConvolutionFilter1D::AddFilter(int filterOffset, +bool SkConvolutionFilter1D::AddFilter(int filterOffset, const ConvolutionFixed* filterValues, int filterLength) { // It is common for leading/trailing filter values to be zeros. In such @@ -336,8 +337,9 @@ void SkConvolutionFilter1D::AddFilter(int filterOffset, filterLength = lastNonZero + 1 - firstNonZero; MOZ_ASSERT(filterLength > 0); - fFilterValues.insert(fFilterValues.end(), &filterValues[firstNonZero], - &filterValues[lastNonZero + 1]); + if (!fFilterValues.append(&filterValues[firstNonZero], filterLength)) { + return false; + } } else { // Here all the factors were zeroes. filterLength = 0; @@ -345,11 +347,17 @@ void SkConvolutionFilter1D::AddFilter(int filterOffset, FilterInstance instance = { // We pushed filterLength elements onto fFilterValues - int(fFilterValues.size()) - filterLength, filterOffset, filterLength, + int(fFilterValues.length()) - filterLength, filterOffset, filterLength, filterSize}; - fFilters.push_back(instance); + if (!fFilters.append(instance)) { + if (filterLength > 0) { + fFilterValues.shrinkBy(filterLength); + } + return false; + } fMaxFilter = std::max(fMaxFilter, filterLength); + return true; } bool SkConvolutionFilter1D::ComputeFilterValues( @@ -383,10 +391,13 @@ bool SkConvolutionFilter1D::ComputeFilterValues( int32_t filterValueCount = int32_t(ceilf(aDstSize * srcSupport * 2)); if (aDstSize > maxToPassToReserveAdditional || filterValueCount < 0 || - filterValueCount > maxToPassToReserveAdditional) { + filterValueCount > maxToPassToReserveAdditional || + !reserveAdditional(aDstSize, filterValueCount)) { return false; } - reserveAdditional(aDstSize, filterValueCount); + size_t oldFiltersLength = fFilters.length(); + size_t oldFilterValuesLength = fFilterValues.length(); + int oldMaxFilter = fMaxFilter; for (int32_t destI = 0; destI < aDstSize; destI++) { // This is the pixel in the source directly under the pixel in the dest. // Note that we base computations on the "center" of the pixels. To see @@ -443,7 +454,12 @@ bool SkConvolutionFilter1D::ComputeFilterValues( ConvolutionFixed leftovers = ToFixed(1) - fixedSum; fixedFilterValues[filterCount / 2] += leftovers; - AddFilter(int32_t(srcBegin), fixedFilterValues.begin(), filterCount); + if (!AddFilter(int32_t(srcBegin), fixedFilterValues.begin(), filterCount)) { + fFilters.shrinkTo(oldFiltersLength); + fFilterValues.shrinkTo(oldFilterValuesLength); + fMaxFilter = oldMaxFilter; + return false; + } } return maxFilter() > 0 && numValues() == aDstSize; @@ -515,6 +531,9 @@ bool BGRAConvolve2D(const unsigned char* sourceData, int sourceByteRowStride, } CircularRowBuffer rowBuffer(rowBufferWidth, rowBufferHeight, filterOffset); + if (!rowBuffer.AllocBuffer()) { + return false; + } // Loop over every possible output row, processing just enough horizontal // convolutions to run each subsequent vertical convolution. diff --git a/gfx/2d/SkConvolver.h b/gfx/2d/SkConvolver.h index 5ea8ab9b5d..e21b2be024 100644 --- a/gfx/2d/SkConvolver.h +++ b/gfx/2d/SkConvolver.h @@ -10,7 +10,7 @@ #include "mozilla/Assertions.h" #include <cfloat> #include <cmath> -#include <vector> +#include "mozilla/Vector.h" namespace skia { @@ -81,11 +81,11 @@ class SkConvolutionFilter1D { // Returns the number of filters in this filter. This is the dimension of the // output image. - int numValues() const { return static_cast<int>(fFilters.size()); } + int numValues() const { return static_cast<int>(fFilters.length()); } - void reserveAdditional(int filterCount, int filterValueCount) { - fFilters.reserve(fFilters.size() + filterCount); - fFilterValues.reserve(fFilterValues.size() + filterValueCount); + bool reserveAdditional(int filterCount, int filterValueCount) { + return fFilters.reserve(fFilters.length() + filterCount) && + fFilterValues.reserve(fFilterValues.length() + filterValueCount); } // Appends the given list of scaling values for generating a given output @@ -98,7 +98,7 @@ class SkConvolutionFilter1D { // brighness of the image. // // The filterLength must be > 0. - void AddFilter(int filterOffset, const ConvolutionFixed* filterValues, + bool AddFilter(int filterOffset, const ConvolutionFixed* filterValues, int filterLength); // Retrieves a filter for the given |valueOffset|, a position in the output @@ -139,12 +139,12 @@ class SkConvolutionFilter1D { }; // Stores the information for each filter added to this class. - std::vector<FilterInstance> fFilters; + mozilla::Vector<FilterInstance> fFilters; // We store all the filter values in this flat list, indexed by // |FilterInstance.data_location| to avoid the mallocs required for storing // each one separately. - std::vector<ConvolutionFixed> fFilterValues; + mozilla::Vector<ConvolutionFixed> fFilterValues; // The maximum size of any filter we've added. int fMaxFilter; diff --git a/gfx/cairo/cairo/src/cairo-cff-subset.c b/gfx/cairo/cairo/src/cairo-cff-subset.c index 32a22ee3e0..39e6693a2b 100644 --- a/gfx/cairo/cairo/src/cairo-cff-subset.c +++ b/gfx/cairo/cairo/src/cairo-cff-subset.c @@ -3154,7 +3154,7 @@ _cairo_cff_font_fallback_create (cairo_scaled_font_subset_t *scaled_font_subset cairo_status_t status; cairo_cff_font_t *font; - font = _cairo_malloc (sizeof (cairo_cff_font_t)); + font = calloc (1, sizeof (cairo_cff_font_t)); if (unlikely (font == NULL)) return _cairo_error (CAIRO_STATUS_NO_MEMORY); diff --git a/gfx/cairo/cff-font-creation.patch b/gfx/cairo/cff-font-creation.patch new file mode 100644 index 0000000000..1ee5398b0c --- /dev/null +++ b/gfx/cairo/cff-font-creation.patch @@ -0,0 +1,12 @@ +diff --git a/gfx/cairo/cairo/src/cairo-cff-subset.c b/gfx/cairo/cairo/src/cairo-cff-subset.c +--- a/gfx/cairo/cairo/src/cairo-cff-subset.c ++++ b/gfx/cairo/cairo/src/cairo-cff-subset.c +@@ -3154,7 +3154,7 @@ static cairo_int_status_t + cairo_status_t status; + cairo_cff_font_t *font; + +- font = _cairo_malloc (sizeof (cairo_cff_font_t)); ++ font = calloc (1, sizeof (cairo_cff_font_t)); + if (unlikely (font == NULL)) + return _cairo_error (CAIRO_STATUS_NO_MEMORY); + diff --git a/gfx/config/gfxVars.h b/gfx/config/gfxVars.h index d74255b5db..c23db91cec 100644 --- a/gfx/config/gfxVars.h +++ b/gfx/config/gfxVars.h @@ -58,7 +58,7 @@ class gfxVarReceiver; _(WebRenderBatchedUploadThreshold, int32_t, 512 * 512) \ _(UseSoftwareWebRender, bool, false) \ _(AllowSoftwareWebRenderD3D11, bool, false) \ - _(ScreenDepth, int32_t, 0) \ + _(PrimaryScreenDepth, int32_t, 0) \ _(GREDirectory, nsString, nsString()) \ _(ProfDirectory, nsString, nsString()) \ _(AllowD3D11KeyedMutex, bool, false) \ @@ -101,7 +101,8 @@ class gfxVarReceiver; _(AllowSoftwareWebRenderOGL, bool, false) \ _(WebglUseHardware, bool, true) \ _(WebRenderOverlayVpAutoHDR, bool, false) \ - _(WebRenderOverlayVpSuperResolution, bool, false) + _(WebRenderOverlayVpSuperResolution, bool, false) \ + _(AllowWebGPUPresentWithoutReadback, bool, false) /* Add new entries above this line. */ diff --git a/gfx/gl/GLContextProviderEGL.cpp b/gfx/gl/GLContextProviderEGL.cpp index cb47e285a5..ffd32202e5 100644 --- a/gfx/gl/GLContextProviderEGL.cpp +++ b/gfx/gl/GLContextProviderEGL.cpp @@ -336,23 +336,29 @@ EGLSurface GLContextEGL::CreateEGLSurfaceForCompositorWidget( } MOZ_ASSERT(aCompositorWidget); -#ifdef MOZ_WAYLAND - // RenderCompositorEGL does not like EGL_NO_SURFACE as it fallbacks - // to SW rendering or claims itself as paused. - // In case we're missing valid native window because aCompositorWidget hidden, - // just create a fallback EGLSurface. - // Actual EGLSurface will be created by widget code later when - // aCompositorWidget becomes visible. - if (widget::GdkIsWaylandDisplay() && aCompositorWidget->IsHidden()) { - mozilla::gfx::IntSize pbSize(16, 16); - return CreateWaylandOffscreenSurface(*egl, aConfig, pbSize); - } -#endif EGLNativeWindowType window = GET_NATIVE_WINDOW_FROM_COMPOSITOR_WIDGET(aCompositorWidget); if (!window) { +#ifdef MOZ_WIDGET_GTK + // RenderCompositorEGL does not like EGL_NO_SURFACE as it fallbacks + // to SW rendering or claims itself as paused. + // In case we're missing valid native window because aCompositorWidget + // hidden, just create a fallback EGLSurface. Actual EGLSurface will be + // created by widget code later when aCompositorWidget becomes visible. + mozilla::gfx::IntSize pbSize(16, 16); +# ifdef MOZ_WAYLAND + if (GdkIsWaylandDisplay()) { + return CreateWaylandOffscreenSurface(*egl, aConfig, pbSize); + } else +# endif + { + return CreatePBufferSurfaceTryingPowerOfTwo(*egl, aConfig, LOCAL_EGL_NONE, + pbSize); + } +#else gfxCriticalNote << "window is null"; return EGL_NO_SURFACE; +#endif } return mozilla::gl::CreateSurfaceFromNativeWindow(*egl, window, aConfig); @@ -486,16 +492,6 @@ bool GLContextEGL::RenewSurface(CompositorWidget* aWidget) { EGLNativeWindowType nativeWindow = GET_NATIVE_WINDOW_FROM_COMPOSITOR_WIDGET(aWidget); -#ifdef MOZ_WAYLAND - // In case we're missing native window on Wayland CompositorWidget is hidden. - // Don't create a fallback EGL surface but fails here. - // We need to repeat RenewSurface() when native window is available - // (CompositorWidget becomes visible). - if (GdkIsWaylandDisplay()) { - NS_WARNING("Failed to get native window"); - return false; - } -#endif if (nativeWindow) { mSurface = mozilla::gl::CreateSurfaceFromNativeWindow(*mEgl, nativeWindow, mSurfaceConfig); @@ -974,7 +970,7 @@ bool CreateConfig(EglDisplay& aEgl, EGLConfig* aConfig, int32_t aDepth, static bool CreateConfigScreen(EglDisplay& egl, EGLConfig* const aConfig, const bool aEnableDepthBuffer, const bool aUseGles) { - int32_t depth = gfxVars::ScreenDepth(); + int32_t depth = gfxVars::PrimaryScreenDepth(); if (CreateConfig(egl, aConfig, depth, aEnableDepthBuffer, aUseGles)) { return true; } diff --git a/gfx/harfbuzz/NEWS b/gfx/harfbuzz/NEWS index 386a106736..2d6a4a7d6b 100644 --- a/gfx/harfbuzz/NEWS +++ b/gfx/harfbuzz/NEWS @@ -1,3 +1,25 @@ +Overview of changes leading to 8.3.1 +Saturday, March 16, 2024 +==================================== +- hb_blob_create_from_file_or_fail() on Windows will now try to interpret the + file name as UTF-8 first, and as system code page if it is not valid UTF-8. +- Fix hb_style_get_value() in fonts with “STAT” table. +- Properly handle negative offsets in CFF table. +- Update IANA Language Subtag Registry to 2024-03-07. +- Subsetter now supports subsetting “BASE” table. +- Subsetter will update “hhea” font metrics in sync with “OS/2” ones. +- “--variations” option of “hb-subset” now supports leaving out values that + should be unchanged, e.g. “wght=:500:” will change the default and keep max + and min unchanged. It also supports “*=drop” to to pin all axes to default + location. +- Fix hb_ot_math_get_glyph_kerning() to match updated “MATH” table spec. +- Support legacy MacRoman encoding in “cmap” table. +- Various build fixes. +- Various subsetting and instancing fixes. + +- New API: +hb_subset_input_pin_all_axes_to_default() + Overview of changes leading to 8.3.0 Saturday, November 11, 2023 ==================================== @@ -191,7 +213,7 @@ Saturday, February 11, 2023 ==================================== - New hb-paint API that is designed mainly to paint “COLRv1” glyphs, but can be also used as a unified API to paint any of the glyph representations - supported by HarfBuzz (B/W outlines, color layers, or color bitmaps). + supported by HarfBuzz (B/W outlines, color layers, or color bitmaps). (Behdad Esfahbod, Matthias Clasen) - New hb-cairo API for integrating with cairo graphics library. This is provided as a separate harfbuzz-cairo library. (Behdad Esfahbod, Matthias Clasen) @@ -202,7 +224,7 @@ Saturday, February 11, 2023 https://github.com/harfbuzz/boring-expansion-spec/blob/main/glyf1-cubicOutlines.md for spec. (Behdad Esfahbod) - Various subsetter improvements. (Garret Rieger, Qunxin Liu, Behdad Esfahbod) -- Various documentation improvements. +- Various documentation improvements. (Behdad Esfahbod, Matthias Clasen, Khaled Hosny) - Significantly reduced memory use during shaping. (Behdad Esfahbod) - Greatly reduced memory use during subsetting “CFF” table. (Behdad Esfahbod) @@ -946,7 +968,7 @@ Tuesday, March 16, 2021 Previously these were shaped using the generalized Arabic shaper. (David Corbett) - Fix regression in shaping of U+0B55 ORIYA SIGN OVERLINE. (David Corbett) - Update language tags. (David Corbett) -- Variations: reduce error: do not round each interpolated delta. (Just van Rossum) +- Variations: reduce error: do not round each interpolated delta. (Just van Rossum) - Documentation improvements. (Khaled Hosny, Nathan Willis) - Subsetter improvements: subsets most, if not all, lookup types now. (Garret Rieger, Qunxin Liu) - Fuzzer-found fixes and other improvements when memory failures happen. (Behdad) diff --git a/gfx/harfbuzz/README.md b/gfx/harfbuzz/README.md index 33165091a8..d11c489f10 100644 --- a/gfx/harfbuzz/README.md +++ b/gfx/harfbuzz/README.md @@ -2,7 +2,7 @@ [![CircleCI Build Status](https://circleci.com/gh/harfbuzz/harfbuzz/tree/main.svg?style=svg)](https://circleci.com/gh/harfbuzz/harfbuzz/tree/main) [![OSS-Fuzz Status](https://oss-fuzz-build-logs.storage.googleapis.com/badges/harfbuzz.svg)](https://oss-fuzz-build-logs.storage.googleapis.com/index.html) [![Coverity Scan Build Status](https://scan.coverity.com/projects/15166/badge.svg)](https://scan.coverity.com/projects/harfbuzz) -[![Codacy Badge](https://app.codacy.com/project/badge/Grade/89c872f5ce1c42af802602bfcd15d90a)](https://www.codacy.com/gh/harfbuzz/harfbuzz/dashboard?utm_source=github.com&utm_medium=referral&utm_content=harfbuzz/harfbuzz&utm_campaign=Badge_Grade) +[![Codacy Badge](https://app.codacy.com/project/badge/Grade/89c872f5ce1c42af802602bfcd15d90a)](https://app.codacy.com/gh/harfbuzz/harfbuzz/dashboard?utm_source=gh&utm_medium=referral&utm_content=&utm_campaign=Badge_grade) [![Codecov Code Coverage](https://codecov.io/gh/harfbuzz/harfbuzz/branch/main/graph/badge.svg)](https://codecov.io/gh/harfbuzz/harfbuzz) [![Packaging status](https://repology.org/badge/tiny-repos/harfbuzz.svg)](https://repology.org/project/harfbuzz/versions) [![OpenSSF Scorecard](https://api.securityscorecards.dev/projects/github.com/harfbuzz/harfbuzz/badge)](https://securityscorecards.dev/viewer/?uri=github.com/harfbuzz/harfbuzz) diff --git a/gfx/harfbuzz/configure.ac b/gfx/harfbuzz/configure.ac index 0d5b3d9675..d7ac9333e2 100644 --- a/gfx/harfbuzz/configure.ac +++ b/gfx/harfbuzz/configure.ac @@ -1,6 +1,6 @@ AC_PREREQ([2.64]) AC_INIT([HarfBuzz], - [8.3.0], + [8.3.1], [https://github.com/harfbuzz/harfbuzz/issues/new], [harfbuzz], [http://harfbuzz.org/]) diff --git a/gfx/harfbuzz/moz.yaml b/gfx/harfbuzz/moz.yaml index c94f303221..182f7f4c28 100644 --- a/gfx/harfbuzz/moz.yaml +++ b/gfx/harfbuzz/moz.yaml @@ -20,11 +20,11 @@ origin: # Human-readable identifier for this version/release # Generally "version NNN", "tag SSS", "bookmark SSS" - release: 8.3.0 (2023-11-11T16:07:57+02:00). + release: 8.3.1 (2024-03-17T07:50:59+02:00). # Revision to pull in # Must be a long or short commit SHA (long preferred) - revision: 8.3.0 + revision: 8.3.1 # The package's license, where possible using the mnemonic from # https://spdx.org/licenses/ diff --git a/gfx/harfbuzz/src/Makefile.am b/gfx/harfbuzz/src/Makefile.am index ff6a6d6f37..0f708a446f 100644 --- a/gfx/harfbuzz/src/Makefile.am +++ b/gfx/harfbuzz/src/Makefile.am @@ -174,7 +174,7 @@ libharfbuzz_la_CPPFLAGS = $(HBCFLAGS) $(CODE_COVERAGE_CFLAGS) libharfbuzz_la_LDFLAGS = $(base_link_flags) $(export_symbols) $(CODE_COVERAGE_LDFLAGS) libharfbuzz_la_LIBADD = $(HBLIBS) EXTRA_libharfbuzz_la_DEPENDENCIES = $(harfbuzz_def_dependency) -pkginclude_HEADERS = $(HBHEADERS) +pkginclude_HEADERS = $(HBHEADERS) hb-features.h nodist_pkginclude_HEADERS = pkgconfigdir = $(libdir)/pkgconfig pkgconfig_DATA = harfbuzz.pc @@ -533,11 +533,11 @@ test_instancer_solver_SOURCES = test-subset-instancer-solver.cc hb-subset-instan test_instancer_solver_CPPFLAGS = $(COMPILED_TESTS_CPPFLAGS) test_instancer_solver_LDADD = $(COMPILED_TESTS_LDADD) -test_tuple_varstore_SOURCES = test-tuple-varstore.cc hb-subset-instancer-solver.cc hb-static.cc +test_tuple_varstore_SOURCES = test-tuple-varstore.cc hb-subset-instancer-solver.cc hb-subset-instancer-iup.cc hb-static.cc test_tuple_varstore_CPPFLAGS = $(COMPILED_TESTS_CPPFLAGS) test_tuple_varstore_LDADD = $(COMPILED_TESTS_LDADD) -test_item_varstore_SOURCES = test-item-varstore.cc hb-subset-instancer-solver.cc hb-static.cc +test_item_varstore_SOURCES = test-item-varstore.cc hb-subset-instancer-solver.cc hb-subset-instancer-iup.cc hb-static.cc test_item_varstore_CPPFLAGS = $(COMPILED_TESTS_CPPFLAGS) test_item_varstore_LDADD = $(COMPILED_TESTS_LDADD) diff --git a/gfx/harfbuzz/src/Makefile.sources b/gfx/harfbuzz/src/Makefile.sources index fbbff5325e..33b5ef8083 100644 --- a/gfx/harfbuzz/src/Makefile.sources +++ b/gfx/harfbuzz/src/Makefile.sources @@ -368,6 +368,8 @@ HB_SUBSET_sources = \ hb-subset-cff2.cc \ hb-subset-input.cc \ hb-subset-input.hh \ + hb-subset-instancer-iup.hh \ + hb-subset-instancer-iup.cc \ hb-subset-instancer-solver.hh \ hb-subset-instancer-solver.cc \ hb-subset-accelerator.hh \ diff --git a/gfx/harfbuzz/src/OT/Color/COLR/COLR.hh b/gfx/harfbuzz/src/OT/Color/COLR/COLR.hh index b632a1d9eb..623775a771 100644 --- a/gfx/harfbuzz/src/OT/Color/COLR/COLR.hh +++ b/gfx/harfbuzz/src/OT/Color/COLR/COLR.hh @@ -68,7 +68,7 @@ public: hb_font_t *font; unsigned int palette_index; hb_color_t foreground; - VarStoreInstancer &instancer; + ItemVarStoreInstancer &instancer; hb_map_t current_glyphs; hb_map_t current_layers; int depth_left = HB_MAX_NESTING_LEVEL; @@ -80,7 +80,7 @@ public: hb_font_t *font_, unsigned int palette_, hb_color_t foreground_, - VarStoreInstancer &instancer_) : + ItemVarStoreInstancer &instancer_) : base (base_), funcs (funcs_), data (data_), @@ -245,7 +245,7 @@ struct Variable { value.closurev1 (c); } bool subset (hb_subset_context_t *c, - const VarStoreInstancer &instancer) const + const ItemVarStoreInstancer &instancer) const { TRACE_SUBSET (this); if (!value.subset (c, instancer, varIdxBase)) return_trace (false); @@ -270,7 +270,7 @@ struct Variable void get_color_stop (hb_paint_context_t *c, hb_color_stop_t *stop, - const VarStoreInstancer &instancer) const + const ItemVarStoreInstancer &instancer) const { value.get_color_stop (c, stop, varIdxBase, instancer); } @@ -305,7 +305,7 @@ struct NoVariable { value.closurev1 (c); } bool subset (hb_subset_context_t *c, - const VarStoreInstancer &instancer) const + const ItemVarStoreInstancer &instancer) const { TRACE_SUBSET (this); return_trace (value.subset (c, instancer, varIdxBase)); @@ -325,7 +325,7 @@ struct NoVariable void get_color_stop (hb_paint_context_t *c, hb_color_stop_t *stop, - const VarStoreInstancer &instancer) const + const ItemVarStoreInstancer &instancer) const { value.get_color_stop (c, stop, VarIdx::NO_VARIATION, instancer); } @@ -348,7 +348,7 @@ struct ColorStop { c->add_palette_index (paletteIndex); } bool subset (hb_subset_context_t *c, - const VarStoreInstancer &instancer, + const ItemVarStoreInstancer &instancer, uint32_t varIdxBase) const { TRACE_SUBSET (this); @@ -374,7 +374,7 @@ struct ColorStop void get_color_stop (hb_paint_context_t *c, hb_color_stop_t *out, uint32_t varIdx, - const VarStoreInstancer &instancer) const + const ItemVarStoreInstancer &instancer) const { out->offset = stopOffset.to_float(instancer (varIdx, 0)); out->color = c->get_color (paletteIndex, @@ -410,7 +410,7 @@ struct ColorLine } bool subset (hb_subset_context_t *c, - const VarStoreInstancer &instancer) const + const ItemVarStoreInstancer &instancer) const { TRACE_SUBSET (this); auto *out = c->serializer->start_embed (this); @@ -439,7 +439,7 @@ struct ColorLine unsigned int start, unsigned int *count, hb_color_stop_t *color_stops, - const VarStoreInstancer &instancer) const + const ItemVarStoreInstancer &instancer) const { unsigned int len = stops.len; @@ -543,7 +543,7 @@ struct Affine2x3 } bool subset (hb_subset_context_t *c, - const VarStoreInstancer &instancer, + const ItemVarStoreInstancer &instancer, uint32_t varIdxBase) const { TRACE_SUBSET (this); @@ -588,7 +588,7 @@ struct PaintColrLayers void closurev1 (hb_colrv1_closure_context_t* c) const; bool subset (hb_subset_context_t *c, - const VarStoreInstancer &instancer HB_UNUSED) const + const ItemVarStoreInstancer &instancer HB_UNUSED) const { TRACE_SUBSET (this); auto *out = c->serializer->embed (this); @@ -620,7 +620,7 @@ struct PaintSolid { c->add_palette_index (paletteIndex); } bool subset (hb_subset_context_t *c, - const VarStoreInstancer &instancer, + const ItemVarStoreInstancer &instancer, uint32_t varIdxBase) const { TRACE_SUBSET (this); @@ -669,7 +669,7 @@ struct PaintLinearGradient { (this+colorLine).closurev1 (c); } bool subset (hb_subset_context_t *c, - const VarStoreInstancer &instancer, + const ItemVarStoreInstancer &instancer, uint32_t varIdxBase) const { TRACE_SUBSET (this); @@ -736,7 +736,7 @@ struct PaintRadialGradient { (this+colorLine).closurev1 (c); } bool subset (hb_subset_context_t *c, - const VarStoreInstancer &instancer, + const ItemVarStoreInstancer &instancer, uint32_t varIdxBase) const { TRACE_SUBSET (this); @@ -803,7 +803,7 @@ struct PaintSweepGradient { (this+colorLine).closurev1 (c); } bool subset (hb_subset_context_t *c, - const VarStoreInstancer &instancer, + const ItemVarStoreInstancer &instancer, uint32_t varIdxBase) const { TRACE_SUBSET (this); @@ -863,7 +863,7 @@ struct PaintGlyph void closurev1 (hb_colrv1_closure_context_t* c) const; bool subset (hb_subset_context_t *c, - const VarStoreInstancer &instancer) const + const ItemVarStoreInstancer &instancer) const { TRACE_SUBSET (this); auto *out = c->serializer->embed (this); @@ -906,7 +906,7 @@ struct PaintColrGlyph void closurev1 (hb_colrv1_closure_context_t* c) const; bool subset (hb_subset_context_t *c, - const VarStoreInstancer &instancer HB_UNUSED) const + const ItemVarStoreInstancer &instancer HB_UNUSED) const { TRACE_SUBSET (this); auto *out = c->serializer->embed (this); @@ -936,7 +936,7 @@ struct PaintTransform HB_INTERNAL void closurev1 (hb_colrv1_closure_context_t* c) const; bool subset (hb_subset_context_t *c, - const VarStoreInstancer &instancer) const + const ItemVarStoreInstancer &instancer) const { TRACE_SUBSET (this); auto *out = c->serializer->embed (this); @@ -975,7 +975,7 @@ struct PaintTranslate HB_INTERNAL void closurev1 (hb_colrv1_closure_context_t* c) const; bool subset (hb_subset_context_t *c, - const VarStoreInstancer &instancer, + const ItemVarStoreInstancer &instancer, uint32_t varIdxBase) const { TRACE_SUBSET (this); @@ -1024,7 +1024,7 @@ struct PaintScale HB_INTERNAL void closurev1 (hb_colrv1_closure_context_t* c) const; bool subset (hb_subset_context_t *c, - const VarStoreInstancer &instancer, + const ItemVarStoreInstancer &instancer, uint32_t varIdxBase) const { TRACE_SUBSET (this); @@ -1073,7 +1073,7 @@ struct PaintScaleAroundCenter HB_INTERNAL void closurev1 (hb_colrv1_closure_context_t* c) const; bool subset (hb_subset_context_t *c, - const VarStoreInstancer &instancer, + const ItemVarStoreInstancer &instancer, uint32_t varIdxBase) const { TRACE_SUBSET (this); @@ -1132,7 +1132,7 @@ struct PaintScaleUniform HB_INTERNAL void closurev1 (hb_colrv1_closure_context_t* c) const; bool subset (hb_subset_context_t *c, - const VarStoreInstancer &instancer, + const ItemVarStoreInstancer &instancer, uint32_t varIdxBase) const { TRACE_SUBSET (this); @@ -1176,7 +1176,7 @@ struct PaintScaleUniformAroundCenter HB_INTERNAL void closurev1 (hb_colrv1_closure_context_t* c) const; bool subset (hb_subset_context_t *c, - const VarStoreInstancer &instancer, + const ItemVarStoreInstancer &instancer, uint32_t varIdxBase) const { TRACE_SUBSET (this); @@ -1232,7 +1232,7 @@ struct PaintRotate HB_INTERNAL void closurev1 (hb_colrv1_closure_context_t* c) const; bool subset (hb_subset_context_t *c, - const VarStoreInstancer &instancer, + const ItemVarStoreInstancer &instancer, uint32_t varIdxBase) const { TRACE_SUBSET (this); @@ -1276,7 +1276,7 @@ struct PaintRotateAroundCenter HB_INTERNAL void closurev1 (hb_colrv1_closure_context_t* c) const; bool subset (hb_subset_context_t *c, - const VarStoreInstancer &instancer, + const ItemVarStoreInstancer &instancer, uint32_t varIdxBase) const { TRACE_SUBSET (this); @@ -1332,7 +1332,7 @@ struct PaintSkew HB_INTERNAL void closurev1 (hb_colrv1_closure_context_t* c) const; bool subset (hb_subset_context_t *c, - const VarStoreInstancer &instancer, + const ItemVarStoreInstancer &instancer, uint32_t varIdxBase) const { TRACE_SUBSET (this); @@ -1381,7 +1381,7 @@ struct PaintSkewAroundCenter HB_INTERNAL void closurev1 (hb_colrv1_closure_context_t* c) const; bool subset (hb_subset_context_t *c, - const VarStoreInstancer &instancer, + const ItemVarStoreInstancer &instancer, uint32_t varIdxBase) const { TRACE_SUBSET (this); @@ -1440,7 +1440,7 @@ struct PaintComposite void closurev1 (hb_colrv1_closure_context_t* c) const; bool subset (hb_subset_context_t *c, - const VarStoreInstancer &instancer) const + const ItemVarStoreInstancer &instancer) const { TRACE_SUBSET (this); auto *out = c->serializer->embed (this); @@ -1491,7 +1491,7 @@ struct ClipBoxFormat1 return_trace (c->check_struct (this)); } - void get_clip_box (ClipBoxData &clip_box, const VarStoreInstancer &instancer HB_UNUSED) const + void get_clip_box (ClipBoxData &clip_box, const ItemVarStoreInstancer &instancer HB_UNUSED) const { clip_box.xMin = xMin; clip_box.yMin = yMin; @@ -1500,7 +1500,7 @@ struct ClipBoxFormat1 } bool subset (hb_subset_context_t *c, - const VarStoreInstancer &instancer, + const ItemVarStoreInstancer &instancer, uint32_t varIdxBase) const { TRACE_SUBSET (this); @@ -1533,7 +1533,7 @@ struct ClipBoxFormat1 struct ClipBoxFormat2 : Variable<ClipBoxFormat1> { - void get_clip_box (ClipBoxData &clip_box, const VarStoreInstancer &instancer) const + void get_clip_box (ClipBoxData &clip_box, const ItemVarStoreInstancer &instancer) const { value.get_clip_box(clip_box, instancer); if (instancer) @@ -1549,7 +1549,7 @@ struct ClipBoxFormat2 : Variable<ClipBoxFormat1> struct ClipBox { bool subset (hb_subset_context_t *c, - const VarStoreInstancer &instancer) const + const ItemVarStoreInstancer &instancer) const { TRACE_SUBSET (this); switch (u.format) { @@ -1572,7 +1572,7 @@ struct ClipBox } bool get_extents (hb_glyph_extents_t *extents, - const VarStoreInstancer &instancer) const + const ItemVarStoreInstancer &instancer) const { ClipBoxData clip_box; switch (u.format) { @@ -1608,7 +1608,7 @@ struct ClipRecord bool subset (hb_subset_context_t *c, const void *base, - const VarStoreInstancer &instancer) const + const ItemVarStoreInstancer &instancer) const { TRACE_SUBSET (this); auto *out = c->serializer->embed (*this); @@ -1625,7 +1625,7 @@ struct ClipRecord bool get_extents (hb_glyph_extents_t *extents, const void *base, - const VarStoreInstancer &instancer) const + const ItemVarStoreInstancer &instancer) const { return (base+clipBox).get_extents (extents, instancer); } @@ -1642,7 +1642,7 @@ DECLARE_NULL_NAMESPACE_BYTES (OT, ClipRecord); struct ClipList { unsigned serialize_clip_records (hb_subset_context_t *c, - const VarStoreInstancer &instancer, + const ItemVarStoreInstancer &instancer, const hb_set_t& gids, const hb_map_t& gid_offset_map) const { @@ -1695,7 +1695,7 @@ struct ClipList } bool subset (hb_subset_context_t *c, - const VarStoreInstancer &instancer) const + const ItemVarStoreInstancer &instancer) const { TRACE_SUBSET (this); auto *out = c->serializer->start_embed (*this); @@ -1735,7 +1735,7 @@ struct ClipList bool get_extents (hb_codepoint_t gid, hb_glyph_extents_t *extents, - const VarStoreInstancer &instancer) const + const ItemVarStoreInstancer &instancer) const { auto *rec = clips.as_array ().bsearch (gid); if (rec) @@ -1855,7 +1855,7 @@ struct BaseGlyphPaintRecord bool serialize (hb_serialize_context_t *s, const hb_map_t* glyph_map, const void* src_base, hb_subset_context_t *c, - const VarStoreInstancer &instancer) const + const ItemVarStoreInstancer &instancer) const { TRACE_SERIALIZE (this); auto *out = s->embed (this); @@ -1884,7 +1884,7 @@ struct BaseGlyphPaintRecord struct BaseGlyphList : SortedArray32Of<BaseGlyphPaintRecord> { bool subset (hb_subset_context_t *c, - const VarStoreInstancer &instancer) const + const ItemVarStoreInstancer &instancer) const { TRACE_SUBSET (this); auto *out = c->serializer->start_embed (this); @@ -1916,7 +1916,7 @@ struct LayerList : Array32OfOffset32To<Paint> { return this+(*this)[i]; } bool subset (hb_subset_context_t *c, - const VarStoreInstancer &instancer) const + const ItemVarStoreInstancer &instancer) const { TRACE_SUBSET (this); auto *out = c->serializer->start_embed (this); @@ -2206,7 +2206,7 @@ struct COLR auto snap = c->serializer->snapshot (); if (!c->serializer->allocate_size<void> (5 * HBUINT32::static_size)) return_trace (false); - VarStoreInstancer instancer (varStore ? &(this+varStore) : nullptr, + ItemVarStoreInstancer instancer (varStore ? &(this+varStore) : nullptr, varIdxMap ? &(this+varIdxMap) : nullptr, c->plan->normalized_coords.as_array ()); @@ -2250,7 +2250,7 @@ struct COLR if (version != 1) return false; - VarStoreInstancer instancer (&(this+varStore), + ItemVarStoreInstancer instancer (&(this+varStore), &(this+varIdxMap), hb_array (font->coords, font->num_coords)); @@ -2301,7 +2301,7 @@ struct COLR bool get_clip (hb_codepoint_t glyph, hb_glyph_extents_t *extents, - const VarStoreInstancer instancer) const + const ItemVarStoreInstancer instancer) const { return (this+clipList).get_extents (glyph, extents, @@ -2312,7 +2312,7 @@ struct COLR bool paint_glyph (hb_font_t *font, hb_codepoint_t glyph, hb_paint_funcs_t *funcs, void *data, unsigned int palette_index, hb_color_t foreground, bool clip = true) const { - VarStoreInstancer instancer (&(this+varStore), + ItemVarStoreInstancer instancer (&(this+varStore), &(this+varIdxMap), hb_array (font->coords, font->num_coords)); hb_paint_context_t c (this, funcs, data, font, palette_index, foreground, instancer); @@ -2327,7 +2327,7 @@ struct COLR { // COLRv1 glyph - VarStoreInstancer instancer (&(this+varStore), + ItemVarStoreInstancer instancer (&(this+varStore), &(this+varIdxMap), hb_array (font->coords, font->num_coords)); @@ -2413,7 +2413,7 @@ struct COLR Offset32To<LayerList> layerList; Offset32To<ClipList> clipList; // Offset to ClipList table (may be NULL) Offset32To<DeltaSetIndexMap> varIdxMap; // Offset to DeltaSetIndexMap table (may be NULL) - Offset32To<VariationStore> varStore; + Offset32To<ItemVariationStore> varStore; public: DEFINE_SIZE_MIN (14); }; diff --git a/gfx/harfbuzz/src/OT/Layout/GDEF/GDEF.hh b/gfx/harfbuzz/src/OT/Layout/GDEF/GDEF.hh index 14a9b5e5cd..475e6d74d1 100644 --- a/gfx/harfbuzz/src/OT/Layout/GDEF/GDEF.hh +++ b/gfx/harfbuzz/src/OT/Layout/GDEF/GDEF.hh @@ -189,7 +189,7 @@ struct CaretValueFormat3 friend struct CaretValue; hb_position_t get_caret_value (hb_font_t *font, hb_direction_t direction, - const VariationStore &var_store) const + const ItemVariationStore &var_store) const { return HB_DIRECTION_IS_HORIZONTAL (direction) ? font->em_scale_x (coordinate) + (this+deviceTable).get_x_delta (font, var_store) : @@ -251,7 +251,7 @@ struct CaretValue hb_position_t get_caret_value (hb_font_t *font, hb_direction_t direction, hb_codepoint_t glyph_id, - const VariationStore &var_store) const + const ItemVariationStore &var_store) const { switch (u.format) { case 1: return u.format1.get_caret_value (font, direction); @@ -316,7 +316,7 @@ struct LigGlyph unsigned get_lig_carets (hb_font_t *font, hb_direction_t direction, hb_codepoint_t glyph_id, - const VariationStore &var_store, + const ItemVariationStore &var_store, unsigned start_offset, unsigned *caret_count /* IN/OUT */, hb_position_t *caret_array /* OUT */) const @@ -372,7 +372,7 @@ struct LigCaretList unsigned int get_lig_carets (hb_font_t *font, hb_direction_t direction, hb_codepoint_t glyph_id, - const VariationStore &var_store, + const ItemVariationStore &var_store, unsigned int start_offset, unsigned int *caret_count /* IN/OUT */, hb_position_t *caret_array /* OUT */) const @@ -609,7 +609,7 @@ struct GDEFVersion1_2 * definitions--from beginning of GDEF * header (may be NULL). Introduced * in version 0x00010002. */ - Offset32To<VariationStore> + Offset32To<ItemVariationStore> varStore; /* Offset to the table of Item Variation * Store--from beginning of GDEF * header (may be NULL). Introduced @@ -884,14 +884,14 @@ struct GDEF default: return false; } } - const VariationStore &get_var_store () const + const ItemVariationStore &get_var_store () const { switch (u.version.major) { - case 1: return u.version.to_int () >= 0x00010003u ? this+u.version1.varStore : Null(VariationStore); + case 1: return u.version.to_int () >= 0x00010003u ? this+u.version1.varStore : Null(ItemVariationStore); #ifndef HB_NO_BEYOND_64K case 2: return this+u.version2.varStore; #endif - default: return Null(VariationStore); + default: return Null(ItemVariationStore); } } @@ -1011,7 +1011,7 @@ struct GDEF hb_hashmap_t<unsigned, hb_pair_t<unsigned, int>> *layout_variation_idx_delta_map /* OUT */) const { if (!has_var_store ()) return; - const VariationStore &var_store = get_var_store (); + const ItemVariationStore &var_store = get_var_store (); float *store_cache = var_store.create_cache (); unsigned new_major = 0, new_minor = 0; diff --git a/gfx/harfbuzz/src/OT/Layout/GPOS/PairPosFormat2.hh b/gfx/harfbuzz/src/OT/Layout/GPOS/PairPosFormat2.hh index dd02da887d..9c805b39a1 100644 --- a/gfx/harfbuzz/src/OT/Layout/GPOS/PairPosFormat2.hh +++ b/gfx/harfbuzz/src/OT/Layout/GPOS/PairPosFormat2.hh @@ -324,17 +324,8 @@ struct PairPosFormat2_4 : ValueBase } } - const hb_set_t &glyphset = *c->plan->glyphset_gsub (); - const hb_map_t &glyph_map = *c->plan->glyph_map; - - auto it = - + hb_iter (this+coverage) - | hb_filter (glyphset) - | hb_map_retains_sorting (glyph_map) - ; - - out->coverage.serialize_serialize (c->serializer, it); - return_trace (out->class1Count && out->class2Count && bool (it)); + bool ret = out->coverage.serialize_subset(c, coverage, this); + return_trace (out->class1Count && out->class2Count && ret); } diff --git a/gfx/harfbuzz/src/OT/Layout/GPOS/ValueFormat.hh b/gfx/harfbuzz/src/OT/Layout/GPOS/ValueFormat.hh index 17f57db1f5..9442cc1cc5 100644 --- a/gfx/harfbuzz/src/OT/Layout/GPOS/ValueFormat.hh +++ b/gfx/harfbuzz/src/OT/Layout/GPOS/ValueFormat.hh @@ -116,7 +116,7 @@ struct ValueFormat : HBUINT16 if (!use_x_device && !use_y_device) return ret; - const VariationStore &store = c->var_store; + const ItemVariationStore &store = c->var_store; auto *cache = c->var_store_cache; /* pixel -> fractional pixel */ diff --git a/gfx/harfbuzz/src/OT/glyf/CompositeGlyph.hh b/gfx/harfbuzz/src/OT/glyf/CompositeGlyph.hh index 60858a5a58..5c0ecd5133 100644 --- a/gfx/harfbuzz/src/OT/glyf/CompositeGlyph.hh +++ b/gfx/harfbuzz/src/OT/glyf/CompositeGlyph.hh @@ -240,7 +240,8 @@ struct CompositeGlyphRecord } if (is_anchored ()) tx = ty = 0; - trans.init ((float) tx, (float) ty); + /* set is_end_point flag to true, used by IUP delta optimization */ + trans.init ((float) tx, (float) ty, true); { const F2DOT14 *points = (const F2DOT14 *) p; diff --git a/gfx/harfbuzz/src/OT/glyf/Glyph.hh b/gfx/harfbuzz/src/OT/glyf/Glyph.hh index 5ea611948f..69a0b625c7 100644 --- a/gfx/harfbuzz/src/OT/glyf/Glyph.hh +++ b/gfx/harfbuzz/src/OT/glyf/Glyph.hh @@ -103,6 +103,9 @@ struct Glyph } } + bool is_composite () const + { return type == COMPOSITE; } + bool get_all_points_without_var (const hb_face_t *face, contour_point_vector_t &points /* OUT */) const { diff --git a/gfx/harfbuzz/src/OT/glyf/glyf-helpers.hh b/gfx/harfbuzz/src/OT/glyf/glyf-helpers.hh index d0a5a132f0..f157bf0020 100644 --- a/gfx/harfbuzz/src/OT/glyf/glyf-helpers.hh +++ b/gfx/harfbuzz/src/OT/glyf/glyf-helpers.hh @@ -38,7 +38,7 @@ _write_loca (IteratorIn&& it, unsigned padded_size = *it++; offset += padded_size; - DEBUG_MSG (SUBSET, nullptr, "loca entry gid %u offset %u padded-size %u", gid, offset, padded_size); + DEBUG_MSG (SUBSET, nullptr, "loca entry gid %" PRIu32 " offset %u padded-size %u", gid, offset, padded_size); value = offset >> right_shift; *dest++ = value; diff --git a/gfx/harfbuzz/src/gen-def.py b/gfx/harfbuzz/src/gen-def.py index c34976a067..f199d7f314 100755 --- a/gfx/harfbuzz/src/gen-def.py +++ b/gfx/harfbuzz/src/gen-def.py @@ -23,6 +23,7 @@ if '--experimental-api' not in sys.argv: hb_subset_repack_or_fail hb_subset_input_override_name_table hb_subset_input_set_axis_range +hb_subset_input_get_axis_range """.splitlines () symbols = [x for x in symbols if x not in experimental_symbols] symbols = "\n".join (symbols) diff --git a/gfx/harfbuzz/src/gen-tag-table.py b/gfx/harfbuzz/src/gen-tag-table.py index 7e15c08c56..86913c3efa 100755 --- a/gfx/harfbuzz/src/gen-tag-table.py +++ b/gfx/harfbuzz/src/gen-tag-table.py @@ -584,7 +584,7 @@ class BCP47Parser (object): self.grandfathered.add (subtag.lower ()) elif line.startswith ('Description: '): description = line.split (' ', 1)[1].replace (' (individual language)', '') - description = re.sub (' (\(family\)|\((individual |macro)language\)|languages)$', '', + description = re.sub (r' (\(family\)|\((individual |macro)language\)|languages)$', '', description) if subtag in self.names: self.names[subtag] += '\n' + description diff --git a/gfx/harfbuzz/src/graph/classdef-graph.hh b/gfx/harfbuzz/src/graph/classdef-graph.hh index 9cf845a82d..da6378820b 100644 --- a/gfx/harfbuzz/src/graph/classdef-graph.hh +++ b/gfx/harfbuzz/src/graph/classdef-graph.hh @@ -134,20 +134,23 @@ struct ClassDef : public OT::ClassDef struct class_def_size_estimator_t { + // TODO(garretrieger): update to support beyond64k coverage/classdef tables. + constexpr static unsigned class_def_format1_base_size = 6; + constexpr static unsigned class_def_format2_base_size = 4; + constexpr static unsigned coverage_base_size = 4; + constexpr static unsigned bytes_per_range = 6; + constexpr static unsigned bytes_per_glyph = 2; + template<typename It> class_def_size_estimator_t (It glyph_and_class) - : gids_consecutive (true), num_ranges_per_class (), glyphs_per_class () + : num_ranges_per_class (), glyphs_per_class () { - unsigned last_gid = (unsigned) -1; + reset(); for (auto p : + glyph_and_class) { unsigned gid = p.first; unsigned klass = p.second; - if (last_gid != (unsigned) -1 && gid != last_gid + 1) - gids_consecutive = false; - last_gid = gid; - hb_set_t* glyphs; if (glyphs_per_class.has (klass, &glyphs) && glyphs) { glyphs->add (gid); @@ -177,28 +180,54 @@ struct class_def_size_estimator_t } } - // Incremental increase in the Coverage and ClassDef table size - // (worst case) if all glyphs associated with 'klass' were added. - unsigned incremental_coverage_size (unsigned klass) const + void reset() { + class_def_1_size = class_def_format1_base_size; + class_def_2_size = class_def_format2_base_size; + included_glyphs.clear(); + included_classes.clear(); + } + + // Compute the size of coverage for all glyphs added via 'add_class_def_size'. + unsigned coverage_size () const { - // Coverage takes 2 bytes per glyph worst case, - return 2 * glyphs_per_class.get (klass).get_population (); + unsigned format1_size = coverage_base_size + bytes_per_glyph * included_glyphs.get_population(); + unsigned format2_size = coverage_base_size + bytes_per_range * num_glyph_ranges(); + return hb_min(format1_size, format2_size); } - // Incremental increase in the Coverage and ClassDef table size - // (worst case) if all glyphs associated with 'klass' were added. - unsigned incremental_class_def_size (unsigned klass) const + // Compute the new size of the ClassDef table if all glyphs associated with 'klass' were added. + unsigned add_class_def_size (unsigned klass) { - // ClassDef takes 6 bytes per range - unsigned class_def_2_size = 6 * num_ranges_per_class.get (klass); - if (gids_consecutive) - { - // ClassDef1 takes 2 bytes per glyph, but only can be used - // when gids are consecutive. - return hb_min (2 * glyphs_per_class.get (klass).get_population (), class_def_2_size); + if (!included_classes.has(klass)) { + hb_set_t* glyphs = nullptr; + if (glyphs_per_class.has(klass, &glyphs)) { + included_glyphs.union_(*glyphs); + } + + class_def_1_size = class_def_format1_base_size; + if (!included_glyphs.is_empty()) { + unsigned min_glyph = included_glyphs.get_min(); + unsigned max_glyph = included_glyphs.get_max(); + class_def_1_size += bytes_per_glyph * (max_glyph - min_glyph + 1); + } + + class_def_2_size += bytes_per_range * num_ranges_per_class.get (klass); + + included_classes.add(klass); } - return class_def_2_size; + return hb_min (class_def_1_size, class_def_2_size); + } + + unsigned num_glyph_ranges() const { + hb_codepoint_t start = HB_SET_VALUE_INVALID; + hb_codepoint_t end = HB_SET_VALUE_INVALID; + + unsigned count = 0; + while (included_glyphs.next_range (&start, &end)) { + count++; + } + return count; } bool in_error () @@ -214,9 +243,12 @@ struct class_def_size_estimator_t } private: - bool gids_consecutive; hb_hashmap_t<unsigned, unsigned> num_ranges_per_class; hb_hashmap_t<unsigned, hb_set_t> glyphs_per_class; + hb_set_t included_classes; + hb_set_t included_glyphs; + unsigned class_def_1_size; + unsigned class_def_2_size; }; diff --git a/gfx/harfbuzz/src/graph/graph.hh b/gfx/harfbuzz/src/graph/graph.hh index 26ad00bdd9..2a9d8346c0 100644 --- a/gfx/harfbuzz/src/graph/graph.hh +++ b/gfx/harfbuzz/src/graph/graph.hh @@ -195,6 +195,15 @@ struct graph_t return incoming_edges_; } + unsigned incoming_edges_from_parent (unsigned parent_index) const { + if (single_parent != (unsigned) -1) { + return single_parent == parent_index ? 1 : 0; + } + + unsigned* count; + return parents.has(parent_index, &count) ? *count : 0; + } + void reset_parents () { incoming_edges_ = 0; @@ -334,6 +343,16 @@ struct graph_t return true; } + bool give_max_priority () + { + bool result = false; + while (!has_max_priority()) { + result = true; + priority++; + } + return result; + } + bool has_max_priority () const { return priority >= 3; } @@ -1023,6 +1042,11 @@ struct graph_t * Creates a copy of child and re-assigns the link from * parent to the clone. The copy is a shallow copy, objects * linked from child are not duplicated. + * + * Returns the index of the newly created duplicate. + * + * If the child_idx only has incoming edges from parent_idx, this + * will do nothing and return the original child_idx. */ unsigned duplicate_if_shared (unsigned parent_idx, unsigned child_idx) { @@ -1036,18 +1060,20 @@ struct graph_t * Creates a copy of child and re-assigns the link from * parent to the clone. The copy is a shallow copy, objects * linked from child are not duplicated. + * + * Returns the index of the newly created duplicate. + * + * If the child_idx only has incoming edges from parent_idx, + * duplication isn't possible and this will return -1. */ unsigned duplicate (unsigned parent_idx, unsigned child_idx) { update_parents (); - unsigned links_to_child = 0; - for (const auto& l : vertices_[parent_idx].obj.all_links ()) - { - if (l.objidx == child_idx) links_to_child++; - } + const auto& child = vertices_[child_idx]; + unsigned links_to_child = child.incoming_edges_from_parent(parent_idx); - if (vertices_[child_idx].incoming_edges () <= links_to_child) + if (child.incoming_edges () <= links_to_child) { // Can't duplicate this node, doing so would orphan the original one as all remaining links // to child are from parent. @@ -1060,7 +1086,7 @@ struct graph_t parent_idx, child_idx); unsigned clone_idx = duplicate (child_idx); - if (clone_idx == (unsigned) -1) return false; + if (clone_idx == (unsigned) -1) return -1; // duplicate shifts the root node idx, so if parent_idx was root update it. if (parent_idx == clone_idx) parent_idx++; @@ -1076,6 +1102,62 @@ struct graph_t return clone_idx; } + /* + * Creates a copy of child and re-assigns the links from + * parents to the clone. The copy is a shallow copy, objects + * linked from child are not duplicated. + * + * Returns the index of the newly created duplicate. + * + * If the child_idx only has incoming edges from parents, + * duplication isn't possible or duplication fails and this will + * return -1. + */ + unsigned duplicate (const hb_set_t* parents, unsigned child_idx) + { + if (parents->is_empty()) { + return -1; + } + + update_parents (); + + const auto& child = vertices_[child_idx]; + unsigned links_to_child = 0; + unsigned last_parent = parents->get_max(); + unsigned first_parent = parents->get_min(); + for (unsigned parent_idx : *parents) { + links_to_child += child.incoming_edges_from_parent(parent_idx); + } + + if (child.incoming_edges () <= links_to_child) + { + // Can't duplicate this node, doing so would orphan the original one as all remaining links + // to child are from parent. + DEBUG_MSG (SUBSET_REPACK, nullptr, " Not duplicating %u, ..., %u => %u", first_parent, last_parent, child_idx); + return -1; + } + + DEBUG_MSG (SUBSET_REPACK, nullptr, " Duplicating %u, ..., %u => %u", first_parent, last_parent, child_idx); + + unsigned clone_idx = duplicate (child_idx); + if (clone_idx == (unsigned) -1) return false; + + for (unsigned parent_idx : *parents) { + // duplicate shifts the root node idx, so if parent_idx was root update it. + if (parent_idx == clone_idx) parent_idx++; + auto& parent = vertices_[parent_idx]; + for (auto& l : parent.obj.all_links_writer ()) + { + if (l.objidx != child_idx) + continue; + + reassign_link (l, parent_idx, clone_idx); + } + } + + return clone_idx; + } + /* * Adds a new node to the graph, not connected to anything. diff --git a/gfx/harfbuzz/src/graph/pairpos-graph.hh b/gfx/harfbuzz/src/graph/pairpos-graph.hh index f7f74b18c9..fd46861de4 100644 --- a/gfx/harfbuzz/src/graph/pairpos-graph.hh +++ b/gfx/harfbuzz/src/graph/pairpos-graph.hh @@ -247,8 +247,8 @@ struct PairPosFormat2 : public OT::Layout::GPOS_impl::PairPosFormat2_4<SmallType for (unsigned i = 0; i < class1_count; i++) { unsigned accumulated_delta = class1_record_size; - coverage_size += estimator.incremental_coverage_size (i); - class_def_1_size += estimator.incremental_class_def_size (i); + class_def_1_size = estimator.add_class_def_size (i); + coverage_size = estimator.coverage_size (); max_coverage_size = hb_max (max_coverage_size, coverage_size); max_class_def_1_size = hb_max (max_class_def_1_size, class_def_1_size); @@ -280,8 +280,10 @@ struct PairPosFormat2 : public OT::Layout::GPOS_impl::PairPosFormat2_4<SmallType split_points.push (i); // split does not include i, so add the size for i when we reset the size counters. accumulated = base_size + accumulated_delta; - coverage_size = 4 + estimator.incremental_coverage_size (i); - class_def_1_size = 4 + estimator.incremental_class_def_size (i); + + estimator.reset(); + class_def_1_size = estimator.add_class_def_size(i); + coverage_size = estimator.coverage_size(); visited.clear (); // node sharing isn't allowed between splits. } } diff --git a/gfx/harfbuzz/src/graph/test-classdef-graph.cc b/gfx/harfbuzz/src/graph/test-classdef-graph.cc index 266be5e2d4..2da9348111 100644 --- a/gfx/harfbuzz/src/graph/test-classdef-graph.cc +++ b/gfx/harfbuzz/src/graph/test-classdef-graph.cc @@ -26,27 +26,119 @@ #include "gsubgpos-context.hh" #include "classdef-graph.hh" +#include "hb-iter.hh" +#include "hb-serialize.hh" typedef hb_codepoint_pair_t gid_and_class_t; typedef hb_vector_t<gid_and_class_t> gid_and_class_list_t; +template<typename It> +static unsigned actual_class_def_size(It glyph_and_class) { + char buffer[100]; + hb_serialize_context_t serializer(buffer, 100); + OT::ClassDef_serialize (&serializer, glyph_and_class); + serializer.end_serialize (); + assert(!serializer.in_error()); -static bool incremental_size_is (const gid_and_class_list_t& list, unsigned klass, - unsigned cov_expected, unsigned class_def_expected) + hb_blob_t* blob = serializer.copy_blob(); + unsigned size = hb_blob_get_length(blob); + hb_blob_destroy(blob); + return size; +} + +static unsigned actual_class_def_size(gid_and_class_list_t consecutive_map, hb_vector_t<unsigned> classes) { + auto filtered_it = + + consecutive_map.as_sorted_array().iter() + | hb_filter([&] (unsigned c) { + for (unsigned klass : classes) { + if (c == klass) { + return true; + } + } + return false; + }, hb_second); + return actual_class_def_size(+ filtered_it); +} + +template<typename It> +static unsigned actual_coverage_size(It glyphs) { + char buffer[100]; + hb_serialize_context_t serializer(buffer, 100); + OT::Layout::Common::Coverage_serialize (&serializer, glyphs); + serializer.end_serialize (); + assert(!serializer.in_error()); + + hb_blob_t* blob = serializer.copy_blob(); + unsigned size = hb_blob_get_length(blob); + hb_blob_destroy(blob); + return size; +} + +static unsigned actual_coverage_size(gid_and_class_list_t consecutive_map, hb_vector_t<unsigned> classes) { + auto filtered_it = + + consecutive_map.as_sorted_array().iter() + | hb_filter([&] (unsigned c) { + for (unsigned klass : classes) { + if (c == klass) { + return true; + } + } + return false; + }, hb_second); + return actual_coverage_size(+ filtered_it | hb_map_retains_sorting(hb_first)); +} + +static bool check_coverage_size(graph::class_def_size_estimator_t& estimator, + const gid_and_class_list_t& map, + hb_vector_t<unsigned> klasses) +{ + unsigned result = estimator.coverage_size(); + unsigned expected = actual_coverage_size(map, klasses); + if (result != expected) { + printf ("FAIL: estimated coverage expected size %u but was %u\n", expected, result); + return false; + } + return true; +} + +static bool check_add_class_def_size(graph::class_def_size_estimator_t& estimator, + const gid_and_class_list_t& map, + unsigned klass, hb_vector_t<unsigned> klasses) +{ + unsigned result = estimator.add_class_def_size(klass); + unsigned expected = actual_class_def_size(map, klasses); + if (result != expected) { + printf ("FAIL: estimated class def expected size %u but was %u\n", expected, result); + return false; + } + + return check_coverage_size(estimator, map, klasses); +} + +static bool check_add_class_def_size (const gid_and_class_list_t& list, unsigned klass) { graph::class_def_size_estimator_t estimator (list.iter ()); - unsigned result = estimator.incremental_coverage_size (klass); - if (result != cov_expected) + unsigned result = estimator.add_class_def_size (klass); + auto filtered_it = + + list.as_sorted_array().iter() + | hb_filter([&] (unsigned c) { + return c == klass; + }, hb_second); + + unsigned expected = actual_class_def_size(filtered_it); + if (result != expected) { - printf ("FAIL: coverage expected size %u but was %u\n", cov_expected, result); + printf ("FAIL: class def expected size %u but was %u\n", expected, result); return false; } - result = estimator.incremental_class_def_size (klass); - if (result != class_def_expected) + auto cov_it = + filtered_it | hb_map_retains_sorting(hb_first); + result = estimator.coverage_size (); + expected = actual_coverage_size(cov_it); + if (result != expected) { - printf ("FAIL: class def expected size %u but was %u\n", class_def_expected, result); + printf ("FAIL: coverage expected size %u but was %u\n", expected, result); return false; } @@ -57,43 +149,45 @@ static void test_class_and_coverage_size_estimates () { gid_and_class_list_t empty = { }; - assert (incremental_size_is (empty, 0, 0, 0)); - assert (incremental_size_is (empty, 1, 0, 0)); + assert (check_add_class_def_size (empty, 0)); + assert (check_add_class_def_size (empty, 1)); gid_and_class_list_t class_zero = { {5, 0}, }; - assert (incremental_size_is (class_zero, 0, 2, 0)); + assert (check_add_class_def_size (class_zero, 0)); gid_and_class_list_t consecutive = { {4, 0}, {5, 0}, + {6, 1}, {7, 1}, + {8, 2}, {9, 2}, {10, 2}, {11, 2}, }; - assert (incremental_size_is (consecutive, 0, 4, 0)); - assert (incremental_size_is (consecutive, 1, 4, 4)); - assert (incremental_size_is (consecutive, 2, 8, 6)); + assert (check_add_class_def_size (consecutive, 0)); + assert (check_add_class_def_size (consecutive, 1)); + assert (check_add_class_def_size (consecutive, 2)); gid_and_class_list_t non_consecutive = { {4, 0}, - {5, 0}, + {6, 0}, - {6, 1}, - {7, 1}, + {8, 1}, + {10, 1}, {9, 2}, {10, 2}, {11, 2}, - {12, 2}, + {13, 2}, }; - assert (incremental_size_is (non_consecutive, 0, 4, 0)); - assert (incremental_size_is (non_consecutive, 1, 4, 6)); - assert (incremental_size_is (non_consecutive, 2, 8, 6)); + assert (check_add_class_def_size (non_consecutive, 0)); + assert (check_add_class_def_size (non_consecutive, 1)); + assert (check_add_class_def_size (non_consecutive, 2)); gid_and_class_list_t multiple_ranges = { {4, 0}, @@ -108,12 +202,95 @@ static void test_class_and_coverage_size_estimates () {12, 1}, {13, 1}, }; - assert (incremental_size_is (multiple_ranges, 0, 4, 0)); - assert (incremental_size_is (multiple_ranges, 1, 2 * 6, 3 * 6)); + assert (check_add_class_def_size (multiple_ranges, 0)); + assert (check_add_class_def_size (multiple_ranges, 1)); +} + +static void test_running_class_and_coverage_size_estimates () { + // #### With consecutive gids: switches formats ### + gid_and_class_list_t consecutive_map = { + // range 1-4 (f1: 8 bytes), (f2: 6 bytes) + {1, 1}, + {2, 1}, + {3, 1}, + {4, 1}, + + // (f1: 2 bytes), (f2: 6 bytes) + {5, 2}, + + // (f1: 14 bytes), (f2: 6 bytes) + {6, 3}, + {7, 3}, + {8, 3}, + {9, 3}, + {10, 3}, + {11, 3}, + {12, 3}, + }; + + graph::class_def_size_estimator_t estimator1(consecutive_map.iter()); + assert(check_add_class_def_size(estimator1, consecutive_map, 1, {1})); + assert(check_add_class_def_size(estimator1, consecutive_map, 2, {1, 2})); + assert(check_add_class_def_size(estimator1, consecutive_map, 2, {1, 2})); // check that adding the same class again works + assert(check_add_class_def_size(estimator1, consecutive_map, 3, {1, 2, 3})); + + estimator1.reset(); + assert(check_add_class_def_size(estimator1, consecutive_map, 2, {2})); + assert(check_add_class_def_size(estimator1, consecutive_map, 3, {2, 3})); + + // #### With non-consecutive gids: always uses format 2 ### + gid_and_class_list_t non_consecutive_map = { + // range 1-4 (f1: 8 bytes), (f2: 6 bytes) + {1, 1}, + {2, 1}, + {3, 1}, + {4, 1}, + + // (f1: 2 bytes), (f2: 12 bytes) + {6, 2}, + {8, 2}, + + // (f1: 14 bytes), (f2: 6 bytes) + {9, 3}, + {10, 3}, + {11, 3}, + {12, 3}, + {13, 3}, + {14, 3}, + {15, 3}, + }; + + graph::class_def_size_estimator_t estimator2(non_consecutive_map.iter()); + assert(check_add_class_def_size(estimator2, non_consecutive_map, 1, {1})); + assert(check_add_class_def_size(estimator2, non_consecutive_map, 2, {1, 2})); + assert(check_add_class_def_size(estimator2, non_consecutive_map, 3, {1, 2, 3})); + + estimator2.reset(); + assert(check_add_class_def_size(estimator2, non_consecutive_map, 2, {2})); + assert(check_add_class_def_size(estimator2, non_consecutive_map, 3, {2, 3})); +} + +static void test_running_class_size_estimates_with_locally_consecutive_glyphs () { + gid_and_class_list_t map = { + {1, 1}, + {6, 2}, + {7, 3}, + }; + + graph::class_def_size_estimator_t estimator(map.iter()); + assert(check_add_class_def_size(estimator, map, 1, {1})); + assert(check_add_class_def_size(estimator, map, 2, {1, 2})); + assert(check_add_class_def_size(estimator, map, 3, {1, 2, 3})); + + estimator.reset(); + assert(check_add_class_def_size(estimator, map, 2, {2})); + assert(check_add_class_def_size(estimator, map, 3, {2, 3})); } int main (int argc, char **argv) { test_class_and_coverage_size_estimates (); + test_running_class_and_coverage_size_estimates (); + test_running_class_size_estimates_with_locally_consecutive_glyphs (); } diff --git a/gfx/harfbuzz/src/harfbuzz-cairo.pc.in b/gfx/harfbuzz/src/harfbuzz-cairo.pc.in index df97ff1512..06ba8047c4 100644 --- a/gfx/harfbuzz/src/harfbuzz-cairo.pc.in +++ b/gfx/harfbuzz/src/harfbuzz-cairo.pc.in @@ -8,5 +8,6 @@ Description: HarfBuzz cairo integration Version: %VERSION% Requires: harfbuzz = %VERSION% +Requires.private: cairo Libs: -L${libdir} -lharfbuzz-cairo Cflags: -I${includedir}/harfbuzz diff --git a/gfx/harfbuzz/src/harfbuzz-subset.cc b/gfx/harfbuzz/src/harfbuzz-subset.cc index c0e23b3eb8..f80c004cbb 100644 --- a/gfx/harfbuzz/src/harfbuzz-subset.cc +++ b/gfx/harfbuzz/src/harfbuzz-subset.cc @@ -54,6 +54,7 @@ #include "hb-subset-cff1.cc" #include "hb-subset-cff2.cc" #include "hb-subset-input.cc" +#include "hb-subset-instancer-iup.cc" #include "hb-subset-instancer-solver.cc" #include "hb-subset-plan.cc" #include "hb-subset-repacker.cc" diff --git a/gfx/harfbuzz/src/hb-aat-layout-morx-table.hh b/gfx/harfbuzz/src/hb-aat-layout-morx-table.hh index 06c9334b37..8436551324 100644 --- a/gfx/harfbuzz/src/hb-aat-layout-morx-table.hh +++ b/gfx/harfbuzz/src/hb-aat-layout-morx-table.hh @@ -552,6 +552,7 @@ struct LigatureSubtable { DEBUG_MSG (APPLY, nullptr, "Skipping ligature component"); if (unlikely (!buffer->move_to (match_positions[--match_length % ARRAY_LENGTH (match_positions)]))) return; + buffer->cur().unicode_props() |= UPROPS_MASK_IGNORABLE; if (unlikely (!buffer->replace_glyph (DELETED_GLYPH))) return; } diff --git a/gfx/harfbuzz/src/hb-algs.hh b/gfx/harfbuzz/src/hb-algs.hh index ea97057165..efa6074a42 100644 --- a/gfx/harfbuzz/src/hb-algs.hh +++ b/gfx/harfbuzz/src/hb-algs.hh @@ -671,7 +671,7 @@ struct hb_pair_t return 0; } - friend void swap (hb_pair_t& a, hb_pair_t& b) + friend void swap (hb_pair_t& a, hb_pair_t& b) noexcept { hb_swap (a.first, b.first); hb_swap (a.second, b.second); @@ -1053,6 +1053,18 @@ _hb_cmp_method (const void *pkey, const void *pval, Ts... ds) return val.cmp (key, ds...); } +template <typename K, typename V> +static int +_hb_cmp_operator (const void *pkey, const void *pval) +{ + const K& key = * (const K*) pkey; + const V& val = * (const V*) pval; + + if (key < val) return -1; + if (key > val) return 1; + return 0; +} + template <typename V, typename K, typename ...Ts> static inline bool hb_bsearch_impl (unsigned *pos, /* Out */ diff --git a/gfx/harfbuzz/src/hb-bit-set-invertible.hh b/gfx/harfbuzz/src/hb-bit-set-invertible.hh index 2626251807..d5d1326d9f 100644 --- a/gfx/harfbuzz/src/hb-bit-set-invertible.hh +++ b/gfx/harfbuzz/src/hb-bit-set-invertible.hh @@ -39,10 +39,10 @@ struct hb_bit_set_invertible_t hb_bit_set_invertible_t () = default; hb_bit_set_invertible_t (const hb_bit_set_invertible_t& o) = default; - hb_bit_set_invertible_t (hb_bit_set_invertible_t&& other) : hb_bit_set_invertible_t () { hb_swap (*this, other); } + hb_bit_set_invertible_t (hb_bit_set_invertible_t&& other) noexcept : hb_bit_set_invertible_t () { hb_swap (*this, other); } hb_bit_set_invertible_t& operator= (const hb_bit_set_invertible_t& o) = default; - hb_bit_set_invertible_t& operator= (hb_bit_set_invertible_t&& other) { hb_swap (*this, other); return *this; } - friend void swap (hb_bit_set_invertible_t &a, hb_bit_set_invertible_t &b) + hb_bit_set_invertible_t& operator= (hb_bit_set_invertible_t&& other) noexcept { hb_swap (*this, other); return *this; } + friend void swap (hb_bit_set_invertible_t &a, hb_bit_set_invertible_t &b) noexcept { if (likely (!a.s.successful || !b.s.successful)) return; diff --git a/gfx/harfbuzz/src/hb-bit-set.hh b/gfx/harfbuzz/src/hb-bit-set.hh index 1dbcce5cbd..5f4c6f0afe 100644 --- a/gfx/harfbuzz/src/hb-bit-set.hh +++ b/gfx/harfbuzz/src/hb-bit-set.hh @@ -38,10 +38,10 @@ struct hb_bit_set_t ~hb_bit_set_t () = default; hb_bit_set_t (const hb_bit_set_t& other) : hb_bit_set_t () { set (other, true); } - hb_bit_set_t ( hb_bit_set_t&& other) : hb_bit_set_t () { hb_swap (*this, other); } + hb_bit_set_t ( hb_bit_set_t&& other) noexcept : hb_bit_set_t () { hb_swap (*this, other); } hb_bit_set_t& operator= (const hb_bit_set_t& other) { set (other); return *this; } - hb_bit_set_t& operator= (hb_bit_set_t&& other) { hb_swap (*this, other); return *this; } - friend void swap (hb_bit_set_t &a, hb_bit_set_t &b) + hb_bit_set_t& operator= (hb_bit_set_t&& other) noexcept { hb_swap (*this, other); return *this; } + friend void swap (hb_bit_set_t &a, hb_bit_set_t &b) noexcept { if (likely (!a.successful || !b.successful)) return; diff --git a/gfx/harfbuzz/src/hb-blob.cc b/gfx/harfbuzz/src/hb-blob.cc index 265effba03..873d9b257a 100644 --- a/gfx/harfbuzz/src/hb-blob.cc +++ b/gfx/harfbuzz/src/hb-blob.cc @@ -598,6 +598,11 @@ _open_resource_fork (const char *file_name, hb_mapped_file_t *file) * Creates a new blob containing the data from the * specified binary font file. * + * The filename is passed directly to the system on all platforms, + * except on Windows, where the filename is interpreted as UTF-8. + * Only if the filename is not valid UTF-8, it will be interpreted + * according to the system codepage. + * * Returns: An #hb_blob_t pointer with the content of the file, * or hb_blob_get_empty() if failed. * @@ -617,6 +622,11 @@ hb_blob_create_from_file (const char *file_name) * Creates a new blob containing the data from the * specified binary font file. * + * The filename is passed directly to the system on all platforms, + * except on Windows, where the filename is interpreted as UTF-8. + * Only if the filename is not valid UTF-8, it will be interpreted + * according to the system codepage. + * * Returns: An #hb_blob_t pointer with the content of the file, * or `NULL` if failed. * @@ -672,10 +682,19 @@ fail_without_close: if (unlikely (!file)) return nullptr; HANDLE fd; + int conversion; unsigned int size = strlen (file_name) + 1; wchar_t * wchar_file_name = (wchar_t *) hb_malloc (sizeof (wchar_t) * size); if (unlikely (!wchar_file_name)) goto fail_without_close; - mbstowcs (wchar_file_name, file_name, size); + + /* Assume file name is given in UTF-8 encoding */ + conversion = MultiByteToWideChar(CP_UTF8, MB_ERR_INVALID_CHARS, file_name, -1, wchar_file_name, size); + if (conversion <= 0) + { + /* Conversion failed due to invalid UTF-8 characters, + Repeat conversion based on system code page */ + mbstowcs(wchar_file_name, file_name, size); + } #if !WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_DESKTOP) && WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_APP) { CREATEFILE2_EXTENDED_PARAMETERS ceparams = { 0 }; diff --git a/gfx/harfbuzz/src/hb-buffer-verify.cc b/gfx/harfbuzz/src/hb-buffer-verify.cc index 15a53919de..671d6eda8c 100644 --- a/gfx/harfbuzz/src/hb-buffer-verify.cc +++ b/gfx/harfbuzz/src/hb-buffer-verify.cc @@ -149,7 +149,7 @@ buffer_verify_unsafe_to_break (hb_buffer_t *buffer, } assert (text_start < text_end); - if (0) + if (false) printf("start %u end %u text start %u end %u\n", start, end, text_start, text_end); hb_buffer_clear_contents (fragment); @@ -288,7 +288,7 @@ buffer_verify_unsafe_to_concat (hb_buffer_t *buffer, } assert (text_start < text_end); - if (0) + if (false) printf("start %u end %u text start %u end %u\n", start, end, text_start, text_end); #if 0 diff --git a/gfx/harfbuzz/src/hb-cff-interp-dict-common.hh b/gfx/harfbuzz/src/hb-cff-interp-dict-common.hh index 53226b227e..a08b10b5ff 100644 --- a/gfx/harfbuzz/src/hb-cff-interp-dict-common.hh +++ b/gfx/harfbuzz/src/hb-cff-interp-dict-common.hh @@ -54,8 +54,8 @@ struct top_dict_values_t : dict_values_t<OPSTR> } void fini () { dict_values_t<OPSTR>::fini (); } - unsigned int charStringsOffset; - unsigned int FDArrayOffset; + int charStringsOffset; + int FDArrayOffset; }; struct dict_opset_t : opset_t<number_t> @@ -157,11 +157,11 @@ struct top_dict_opset_t : dict_opset_t { switch (op) { case OpCode_CharStrings: - dictval.charStringsOffset = env.argStack.pop_uint (); + dictval.charStringsOffset = env.argStack.pop_int (); env.clear_args (); break; case OpCode_FDArray: - dictval.FDArrayOffset = env.argStack.pop_uint (); + dictval.FDArrayOffset = env.argStack.pop_int (); env.clear_args (); break; case OpCode_FontMatrix: diff --git a/gfx/harfbuzz/src/hb-cff2-interp-cs.hh b/gfx/harfbuzz/src/hb-cff2-interp-cs.hh index 915b10cf39..55b1d3bf8d 100644 --- a/gfx/harfbuzz/src/hb-cff2-interp-cs.hh +++ b/gfx/harfbuzz/src/hb-cff2-interp-cs.hh @@ -168,7 +168,7 @@ struct cff2_cs_interp_env_t : cs_interp_env_t<ELEM, CFF2Subrs> protected: const int *coords; unsigned int num_coords; - const CFF2VariationStore *varStore; + const CFF2ItemVariationStore *varStore; unsigned int region_count; unsigned int ivs; hb_vector_t<float> scalars; diff --git a/gfx/harfbuzz/src/hb-common.cc b/gfx/harfbuzz/src/hb-common.cc index 0c13c7d171..4b8bae4422 100644 --- a/gfx/harfbuzz/src/hb-common.cc +++ b/gfx/harfbuzz/src/hb-common.cc @@ -996,7 +996,7 @@ hb_feature_to_string (hb_feature_t *feature, if (feature->value > 1) { s[len++] = '='; - len += hb_max (0, snprintf (s + len, ARRAY_LENGTH (s) - len, "%u", feature->value)); + len += hb_max (0, snprintf (s + len, ARRAY_LENGTH (s) - len, "%" PRIu32, feature->value)); } assert (len < ARRAY_LENGTH (s)); len = hb_min (len, size - 1); diff --git a/gfx/harfbuzz/src/hb-common.h b/gfx/harfbuzz/src/hb-common.h index a9fe666b39..dfdefc627e 100644 --- a/gfx/harfbuzz/src/hb-common.h +++ b/gfx/harfbuzz/src/hb-common.h @@ -47,14 +47,10 @@ # endif /* !__cplusplus */ #endif -#if defined (_SVR4) || defined (SVR4) || defined (__OpenBSD__) || \ - defined (_sgi) || defined (__sun) || defined (sun) || \ - defined (__digital__) || defined (__HP_cc) -# include <inttypes.h> -#elif defined (_AIX) +#if defined (_AIX) # include <sys/inttypes.h> -#elif defined (_MSC_VER) && _MSC_VER < 1600 -/* VS 2010 (_MSC_VER 1600) has stdint.h */ +#elif defined (_MSC_VER) && _MSC_VER < 1800 +/* VS 2013 (_MSC_VER 1800) has inttypes.h */ typedef __int8 int8_t; typedef unsigned __int8 uint8_t; typedef __int16 int16_t; @@ -63,10 +59,8 @@ typedef __int32 int32_t; typedef unsigned __int32 uint32_t; typedef __int64 int64_t; typedef unsigned __int64 uint64_t; -#elif defined (__KERNEL__) -# include <linux/types.h> #else -# include <stdint.h> +# include <inttypes.h> #endif #if defined(__GNUC__) && ((__GNUC__ > 3) || (__GNUC__ == 3 && __GNUC_MINOR__ >= 1)) diff --git a/gfx/harfbuzz/src/hb-cplusplus.hh b/gfx/harfbuzz/src/hb-cplusplus.hh index 531ef1b7c8..a640e192de 100644 --- a/gfx/harfbuzz/src/hb-cplusplus.hh +++ b/gfx/harfbuzz/src/hb-cplusplus.hh @@ -56,15 +56,15 @@ struct shared_ptr explicit shared_ptr (T *p = nullptr) : p (p) {} shared_ptr (const shared_ptr &o) : p (v::reference (o.p)) {} - shared_ptr (shared_ptr &&o) : p (o.p) { o.p = nullptr; } + shared_ptr (shared_ptr &&o) noexcept : p (o.p) { o.p = nullptr; } shared_ptr& operator = (const shared_ptr &o) { if (p != o.p) { destroy (); p = o.p; reference (); } return *this; } - shared_ptr& operator = (shared_ptr &&o) { v::destroy (p); p = o.p; o.p = nullptr; return *this; } + shared_ptr& operator = (shared_ptr &&o) noexcept { v::destroy (p); p = o.p; o.p = nullptr; return *this; } ~shared_ptr () { v::destroy (p); p = nullptr; } T* get() const { return p; } - void swap (shared_ptr &o) { std::swap (p, o.p); } - friend void swap (shared_ptr &a, shared_ptr &b) { std::swap (a.p, b.p); } + void swap (shared_ptr &o) noexcept { std::swap (p, o.p); } + friend void swap (shared_ptr &a, shared_ptr &b) noexcept { std::swap (a.p, b.p); } operator T * () const { return p; } T& operator * () const { return *get (); } @@ -98,16 +98,16 @@ struct unique_ptr explicit unique_ptr (T *p = nullptr) : p (p) {} unique_ptr (const unique_ptr &o) = delete; - unique_ptr (unique_ptr &&o) : p (o.p) { o.p = nullptr; } + unique_ptr (unique_ptr &&o) noexcept : p (o.p) { o.p = nullptr; } unique_ptr& operator = (const unique_ptr &o) = delete; - unique_ptr& operator = (unique_ptr &&o) { v::destroy (p); p = o.p; o.p = nullptr; return *this; } + unique_ptr& operator = (unique_ptr &&o) noexcept { v::destroy (p); p = o.p; o.p = nullptr; return *this; } ~unique_ptr () { v::destroy (p); p = nullptr; } T* get() const { return p; } T* release () { T* v = p; p = nullptr; return v; } - void swap (unique_ptr &o) { std::swap (p, o.p); } - friend void swap (unique_ptr &a, unique_ptr &b) { std::swap (a.p, b.p); } + void swap (unique_ptr &o) noexcept { std::swap (p, o.p); } + friend void swap (unique_ptr &a, unique_ptr &b) noexcept { std::swap (a.p, b.p); } operator T * () const { return p; } T& operator * () const { return *get (); } diff --git a/gfx/harfbuzz/src/hb-directwrite.cc b/gfx/harfbuzz/src/hb-directwrite.cc index 42764a244b..6c90265d0b 100644 --- a/gfx/harfbuzz/src/hb-directwrite.cc +++ b/gfx/harfbuzz/src/hb-directwrite.cc @@ -173,7 +173,7 @@ _hb_directwrite_shaper_face_data_create (hb_face_t *face) t_DWriteCreateFactory p_DWriteCreateFactory; -#if defined(__GNUC__) +#if defined(__GNUC__) || defined(__clang__) #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wcast-function-type" #endif @@ -181,7 +181,7 @@ _hb_directwrite_shaper_face_data_create (hb_face_t *face) p_DWriteCreateFactory = (t_DWriteCreateFactory) GetProcAddress (data->dwrite_dll, "DWriteCreateFactory"); -#if defined(__GNUC__) +#if defined(__GNUC__) || defined(__clang__) #pragma GCC diagnostic pop #endif diff --git a/gfx/harfbuzz/src/hb-font.hh b/gfx/harfbuzz/src/hb-font.hh index f503575c34..4c8190b0dd 100644 --- a/gfx/harfbuzz/src/hb-font.hh +++ b/gfx/harfbuzz/src/hb-font.hh @@ -651,7 +651,7 @@ struct hb_font_t { if (get_glyph_name (glyph, s, size)) return; - if (size && snprintf (s, size, "gid%u", glyph) < 0) + if (size && snprintf (s, size, "gid%" PRIu32, glyph) < 0) *s = '\0'; } diff --git a/gfx/harfbuzz/src/hb-ft.cc b/gfx/harfbuzz/src/hb-ft.cc index 955a9081e0..3de4a6d5d4 100644 --- a/gfx/harfbuzz/src/hb-ft.cc +++ b/gfx/harfbuzz/src/hb-ft.cc @@ -224,7 +224,7 @@ _hb_ft_hb_font_check_changed (hb_font_t *font, * * Sets the FT_Load_Glyph load flags for the specified #hb_font_t. * - * For more information, see + * For more information, see * <https://freetype.org/freetype2/docs/reference/ft2-glyph_retrieval.html#ft_load_xxx> * * This function works with #hb_font_t objects created by @@ -252,7 +252,7 @@ hb_ft_font_set_load_flags (hb_font_t *font, int load_flags) * * Fetches the FT_Load_Glyph load flags of the specified #hb_font_t. * - * For more information, see + * For more information, see * <https://freetype.org/freetype2/docs/reference/ft2-glyph_retrieval.html#ft_load_xxx> * * This function works with #hb_font_t objects created by @@ -1118,10 +1118,10 @@ _hb_ft_reference_table (hb_face_t *face HB_UNUSED, hb_tag_t tag, void *user_data * This variant of the function does not provide any life-cycle management. * * Most client programs should use hb_ft_face_create_referenced() - * (or, perhaps, hb_ft_face_create_cached()) instead. + * (or, perhaps, hb_ft_face_create_cached()) instead. * * If you know you have valid reasons not to use hb_ft_face_create_referenced(), - * then it is the client program's responsibility to destroy @ft_face + * then it is the client program's responsibility to destroy @ft_face * after the #hb_face_t face object has been destroyed. * * Return value: (transfer full): the new #hb_face_t face object @@ -1215,7 +1215,7 @@ hb_ft_face_finalize (void *arg) hb_face_t * hb_ft_face_create_cached (FT_Face ft_face) { - if (unlikely (!ft_face->generic.data || ft_face->generic.finalizer != (FT_Generic_Finalizer) hb_ft_face_finalize)) + if (unlikely (!ft_face->generic.data || ft_face->generic.finalizer != hb_ft_face_finalize)) { if (ft_face->generic.finalizer) ft_face->generic.finalizer (ft_face); @@ -1241,13 +1241,13 @@ hb_ft_face_create_cached (FT_Face ft_face) * This variant of the function does not provide any life-cycle management. * * Most client programs should use hb_ft_font_create_referenced() - * instead. + * instead. * * If you know you have valid reasons not to use hb_ft_font_create_referenced(), - * then it is the client program's responsibility to destroy @ft_face + * then it is the client program's responsibility to destroy @ft_face * after the #hb_font_t font object has been destroyed. * - * HarfBuzz will use the @destroy callback on the #hb_font_t font object + * HarfBuzz will use the @destroy callback on the #hb_font_t font object * if it is supplied when you use this function. However, even if @destroy * is provided, it is the client program's responsibility to destroy @ft_face, * and it is the client program's responsibility to ensure that @ft_face is diff --git a/gfx/harfbuzz/src/hb-icu.cc b/gfx/harfbuzz/src/hb-icu.cc index e46401f7a6..3707ec30f8 100644 --- a/gfx/harfbuzz/src/hb-icu.cc +++ b/gfx/harfbuzz/src/hb-icu.cc @@ -93,15 +93,16 @@ hb_icu_script_to_script (UScriptCode script) UScriptCode hb_icu_script_from_script (hb_script_t script) { + UScriptCode out = USCRIPT_INVALID_CODE; + if (unlikely (script == HB_SCRIPT_INVALID)) - return USCRIPT_INVALID_CODE; + return out; - unsigned int numScriptCode = 1 + u_getIntPropertyMaxValue (UCHAR_SCRIPT); - for (unsigned int i = 0; i < numScriptCode; i++) - if (unlikely (hb_icu_script_to_script ((UScriptCode) i) == script)) - return (UScriptCode) i; + UErrorCode icu_err = U_ZERO_ERROR; + const unsigned char buf[5] = {HB_UNTAG (script), 0}; + uscript_getCode ((const char *) buf, &out, 1, &icu_err); - return USCRIPT_UNKNOWN; + return out; } diff --git a/gfx/harfbuzz/src/hb-limits.hh b/gfx/harfbuzz/src/hb-limits.hh index 25c1e71e13..7efc893eae 100644 --- a/gfx/harfbuzz/src/hb-limits.hh +++ b/gfx/harfbuzz/src/hb-limits.hh @@ -106,7 +106,7 @@ #endif #ifndef HB_COLRV1_MAX_EDGE_COUNT -#define HB_COLRV1_MAX_EDGE_COUNT 65536 +#define HB_COLRV1_MAX_EDGE_COUNT 2048 #endif diff --git a/gfx/harfbuzz/src/hb-map.hh b/gfx/harfbuzz/src/hb-map.hh index 45a02b830c..6521b1a41d 100644 --- a/gfx/harfbuzz/src/hb-map.hh +++ b/gfx/harfbuzz/src/hb-map.hh @@ -70,9 +70,9 @@ struct hb_hashmap_t alloc (o.population); hb_copy (o, *this); } - hb_hashmap_t (hb_hashmap_t&& o) : hb_hashmap_t () { hb_swap (*this, o); } + hb_hashmap_t (hb_hashmap_t&& o) noexcept : hb_hashmap_t () { hb_swap (*this, o); } hb_hashmap_t& operator= (const hb_hashmap_t& o) { reset (); alloc (o.population); hb_copy (o, *this); return *this; } - hb_hashmap_t& operator= (hb_hashmap_t&& o) { hb_swap (*this, o); return *this; } + hb_hashmap_t& operator= (hb_hashmap_t&& o) noexcept { hb_swap (*this, o); return *this; } hb_hashmap_t (std::initializer_list<hb_pair_t<K, V>> lst) : hb_hashmap_t () { @@ -137,26 +137,23 @@ struct hb_hashmap_t }; hb_object_header_t header; - unsigned int successful : 1; /* Allocations successful */ - unsigned int population : 31; /* Not including tombstones. */ + bool successful; /* Allocations successful */ + unsigned short max_chain_length; + unsigned int population; /* Not including tombstones. */ unsigned int occupancy; /* Including tombstones. */ unsigned int mask; unsigned int prime; - unsigned int max_chain_length; item_t *items; - friend void swap (hb_hashmap_t& a, hb_hashmap_t& b) + friend void swap (hb_hashmap_t& a, hb_hashmap_t& b) noexcept { if (unlikely (!a.successful || !b.successful)) return; - unsigned tmp = a.population; - a.population = b.population; - b.population = tmp; - //hb_swap (a.population, b.population); + hb_swap (a.max_chain_length, b.max_chain_length); + hb_swap (a.population, b.population); hb_swap (a.occupancy, b.occupancy); hb_swap (a.mask, b.mask); hb_swap (a.prime, b.prime); - hb_swap (a.max_chain_length, b.max_chain_length); hb_swap (a.items, b.items); } void init () @@ -164,10 +161,10 @@ struct hb_hashmap_t hb_object_init (this); successful = true; + max_chain_length = 0; population = occupancy = 0; mask = 0; prime = 0; - max_chain_length = 0; items = nullptr; } void fini () @@ -558,7 +555,7 @@ struct hb_map_t : hb_hashmap_t<hb_codepoint_t, ~hb_map_t () = default; hb_map_t () : hashmap () {} hb_map_t (const hb_map_t &o) : hashmap ((hashmap &) o) {} - hb_map_t (hb_map_t &&o) : hashmap (std::move ((hashmap &) o)) {} + hb_map_t (hb_map_t &&o) noexcept : hashmap (std::move ((hashmap &) o)) {} hb_map_t& operator= (const hb_map_t&) = default; hb_map_t& operator= (hb_map_t&&) = default; hb_map_t (std::initializer_list<hb_codepoint_pair_t> lst) : hashmap (lst) {} diff --git a/gfx/harfbuzz/src/hb-object.hh b/gfx/harfbuzz/src/hb-object.hh index e2c2c3394c..5cffe1666b 100644 --- a/gfx/harfbuzz/src/hb-object.hh +++ b/gfx/harfbuzz/src/hb-object.hh @@ -325,7 +325,7 @@ retry: hb_user_data_array_t *user_data = obj->header.user_data.get_acquire (); if (unlikely (!user_data)) { - user_data = (hb_user_data_array_t *) hb_calloc (sizeof (hb_user_data_array_t), 1); + user_data = (hb_user_data_array_t *) hb_calloc (1, sizeof (hb_user_data_array_t)); if (unlikely (!user_data)) return false; user_data->init (); diff --git a/gfx/harfbuzz/src/hb-open-type.hh b/gfx/harfbuzz/src/hb-open-type.hh index 6967bca3d4..9c11f14344 100644 --- a/gfx/harfbuzz/src/hb-open-type.hh +++ b/gfx/harfbuzz/src/hb-open-type.hh @@ -985,6 +985,13 @@ struct SortedArrayOf : ArrayOf<Type, LenType> return_trace (ret); } + SortedArrayOf* copy (hb_serialize_context_t *c) const + { + TRACE_SERIALIZE (this); + SortedArrayOf* out = reinterpret_cast<SortedArrayOf *> (ArrayOf<Type, LenType>::copy (c)); + return_trace (out); + } + template <typename T> Type &bsearch (const T &x, Type ¬_found = Crap (Type)) { return *as_array ().bsearch (x, ¬_found); } diff --git a/gfx/harfbuzz/src/hb-ot-cff-common.hh b/gfx/harfbuzz/src/hb-ot-cff-common.hh index 4fdba197ac..c7c3264c08 100644 --- a/gfx/harfbuzz/src/hb-ot-cff-common.hh +++ b/gfx/harfbuzz/src/hb-ot-cff-common.hh @@ -41,10 +41,21 @@ using namespace OT; using objidx_t = hb_serialize_context_t::objidx_t; using whence_t = hb_serialize_context_t::whence_t; -/* utility macro */ -template<typename Type> -static inline const Type& StructAtOffsetOrNull (const void *P, unsigned int offset) -{ return offset ? StructAtOffset<Type> (P, offset) : Null (Type); } +/* CFF offsets can technically be negative */ +template<typename Type, typename ...Ts> +static inline const Type& StructAtOffsetOrNull (const void *P, int offset, hb_sanitize_context_t &sc, Ts&&... ds) +{ + if (!offset) return Null (Type); + + const char *p = (const char *) P + offset; + if (!sc.check_point (p)) return Null (Type); + + const Type &obj = *reinterpret_cast<const Type *> (p); + if (!obj.sanitize (&sc, std::forward<Ts> (ds)...)) return Null (Type); + + return obj; +} + struct code_pair_t { diff --git a/gfx/harfbuzz/src/hb-ot-cff1-table.hh b/gfx/harfbuzz/src/hb-ot-cff1-table.hh index c869e90554..1bbd463841 100644 --- a/gfx/harfbuzz/src/hb-ot-cff1-table.hh +++ b/gfx/harfbuzz/src/hb-ot-cff1-table.hh @@ -763,9 +763,9 @@ struct cff1_top_dict_values_t : top_dict_values_t<cff1_top_dict_val_t> unsigned int ros_supplement; unsigned int cidCount; - unsigned int EncodingOffset; - unsigned int CharsetOffset; - unsigned int FDSelectOffset; + int EncodingOffset; + int CharsetOffset; + int FDSelectOffset; table_info_t privateDictInfo; }; @@ -821,24 +821,24 @@ struct cff1_top_dict_opset_t : top_dict_opset_t<cff1_top_dict_val_t> break; case OpCode_Encoding: - dictval.EncodingOffset = env.argStack.pop_uint (); + dictval.EncodingOffset = env.argStack.pop_int (); env.clear_args (); if (unlikely (dictval.EncodingOffset == 0)) return; break; case OpCode_charset: - dictval.CharsetOffset = env.argStack.pop_uint (); + dictval.CharsetOffset = env.argStack.pop_int (); env.clear_args (); if (unlikely (dictval.CharsetOffset == 0)) return; break; case OpCode_FDSelect: - dictval.FDSelectOffset = env.argStack.pop_uint (); + dictval.FDSelectOffset = env.argStack.pop_int (); env.clear_args (); break; case OpCode_Private: - dictval.privateDictInfo.offset = env.argStack.pop_uint (); + dictval.privateDictInfo.offset = env.argStack.pop_int (); dictval.privateDictInfo.size = env.argStack.pop_uint (); env.clear_args (); break; @@ -913,7 +913,7 @@ struct cff1_private_dict_values_base_t : dict_values_t<VAL> } void fini () { dict_values_t<VAL>::fini (); } - unsigned int subrsOffset; + int subrsOffset; const CFF1Subrs *localSubrs; }; @@ -948,7 +948,7 @@ struct cff1_private_dict_opset_t : dict_opset_t env.clear_args (); break; case OpCode_Subrs: - dictval.subrsOffset = env.argStack.pop_uint (); + dictval.subrsOffset = env.argStack.pop_int (); env.clear_args (); break; @@ -990,7 +990,7 @@ struct cff1_private_dict_opset_subset_t : dict_opset_t break; case OpCode_Subrs: - dictval.subrsOffset = env.argStack.pop_uint (); + dictval.subrsOffset = env.argStack.pop_int (); env.clear_args (); break; @@ -1090,8 +1090,8 @@ struct cff1 goto fail; hb_barrier (); - topDictIndex = &StructAtOffset<CFF1TopDictIndex> (nameIndex, nameIndex->get_size ()); - if ((topDictIndex == &Null (CFF1TopDictIndex)) || !topDictIndex->sanitize (&sc) || (topDictIndex->count == 0)) + topDictIndex = &StructAtOffsetOrNull<CFF1TopDictIndex> (nameIndex, nameIndex->get_size (), sc); + if (topDictIndex == &Null (CFF1TopDictIndex) || (topDictIndex->count == 0)) goto fail; hb_barrier (); @@ -1108,20 +1108,18 @@ struct cff1 charset = &Null (Charset); else { - charset = &StructAtOffsetOrNull<Charset> (cff, topDict.CharsetOffset); - if (unlikely ((charset == &Null (Charset)) || !charset->sanitize (&sc, &num_charset_entries))) goto fail; - hb_barrier (); + charset = &StructAtOffsetOrNull<Charset> (cff, topDict.CharsetOffset, sc, &num_charset_entries); + if (unlikely (charset == &Null (Charset))) goto fail; } fdCount = 1; if (is_CID ()) { - fdArray = &StructAtOffsetOrNull<CFF1FDArray> (cff, topDict.FDArrayOffset); - fdSelect = &StructAtOffsetOrNull<CFF1FDSelect> (cff, topDict.FDSelectOffset); - if (unlikely ((fdArray == &Null (CFF1FDArray)) || !fdArray->sanitize (&sc) || - (fdSelect == &Null (CFF1FDSelect)) || !fdSelect->sanitize (&sc, fdArray->count))) + fdArray = &StructAtOffsetOrNull<CFF1FDArray> (cff, topDict.FDArrayOffset, sc); + fdSelect = &StructAtOffsetOrNull<CFF1FDSelect> (cff, topDict.FDSelectOffset, sc, fdArray->count); + if (unlikely (fdArray == &Null (CFF1FDArray) || + fdSelect == &Null (CFF1FDSelect))) goto fail; - hb_barrier (); fdCount = fdArray->count; } @@ -1140,27 +1138,19 @@ struct cff1 { if (!is_predef_encoding ()) { - encoding = &StructAtOffsetOrNull<Encoding> (cff, topDict.EncodingOffset); - if (unlikely ((encoding == &Null (Encoding)) || !encoding->sanitize (&sc))) goto fail; - hb_barrier (); + encoding = &StructAtOffsetOrNull<Encoding> (cff, topDict.EncodingOffset, sc); + if (unlikely (encoding == &Null (Encoding))) goto fail; } } - stringIndex = &StructAtOffset<CFF1StringIndex> (topDictIndex, topDictIndex->get_size ()); - if ((stringIndex == &Null (CFF1StringIndex)) || !stringIndex->sanitize (&sc)) + stringIndex = &StructAtOffsetOrNull<CFF1StringIndex> (topDictIndex, topDictIndex->get_size (), sc); + if (stringIndex == &Null (CFF1StringIndex)) goto fail; - hb_barrier (); - globalSubrs = &StructAtOffset<CFF1Subrs> (stringIndex, stringIndex->get_size ()); - if ((globalSubrs != &Null (CFF1Subrs)) && !globalSubrs->sanitize (&sc)) + globalSubrs = &StructAtOffsetOrNull<CFF1Subrs> (stringIndex, stringIndex->get_size (), sc); + charStrings = &StructAtOffsetOrNull<CFF1CharStrings> (cff, topDict.charStringsOffset, sc); + if (charStrings == &Null (CFF1CharStrings)) goto fail; - hb_barrier (); - - charStrings = &StructAtOffsetOrNull<CFF1CharStrings> (cff, topDict.charStringsOffset); - - if ((charStrings == &Null (CFF1CharStrings)) || unlikely (!charStrings->sanitize (&sc))) - goto fail; - hb_barrier (); num_glyphs = charStrings->count; if (num_glyphs != sc.get_num_glyphs ()) @@ -1188,19 +1178,13 @@ struct cff1 font->init (); if (unlikely (!font_interp.interpret (*font))) goto fail; PRIVDICTVAL *priv = &privateDicts[i]; - const hb_ubytes_t privDictStr = StructAtOffset<UnsizedByteStr> (cff, font->privateDictInfo.offset).as_ubytes (font->privateDictInfo.size); - if (unlikely (!privDictStr.sanitize (&sc))) goto fail; - hb_barrier (); + const hb_ubytes_t privDictStr = StructAtOffsetOrNull<UnsizedByteStr> (cff, font->privateDictInfo.offset, sc, font->privateDictInfo.size).as_ubytes (font->privateDictInfo.size); num_interp_env_t env2 (privDictStr); dict_interpreter_t<PRIVOPSET, PRIVDICTVAL> priv_interp (env2); priv->init (); if (unlikely (!priv_interp.interpret (*priv))) goto fail; - priv->localSubrs = &StructAtOffsetOrNull<CFF1Subrs> (&privDictStr, priv->subrsOffset); - if (priv->localSubrs != &Null (CFF1Subrs) && - unlikely (!priv->localSubrs->sanitize (&sc))) - goto fail; - hb_barrier (); + priv->localSubrs = &StructAtOffsetOrNull<CFF1Subrs> (&privDictStr, priv->subrsOffset, sc); } } else /* non-CID */ @@ -1208,18 +1192,13 @@ struct cff1 cff1_top_dict_values_t *font = &topDict; PRIVDICTVAL *priv = &privateDicts[0]; - const hb_ubytes_t privDictStr = StructAtOffset<UnsizedByteStr> (cff, font->privateDictInfo.offset).as_ubytes (font->privateDictInfo.size); - if (unlikely (!privDictStr.sanitize (&sc))) goto fail; - hb_barrier (); + const hb_ubytes_t privDictStr = StructAtOffsetOrNull<UnsizedByteStr> (cff, font->privateDictInfo.offset, sc, font->privateDictInfo.size).as_ubytes (font->privateDictInfo.size); num_interp_env_t env (privDictStr); dict_interpreter_t<PRIVOPSET, PRIVDICTVAL> priv_interp (env); priv->init (); if (unlikely (!priv_interp.interpret (*priv))) goto fail; - priv->localSubrs = &StructAtOffsetOrNull<CFF1Subrs> (&privDictStr, priv->subrsOffset); - if (priv->localSubrs != &Null (CFF1Subrs) && - unlikely (!priv->localSubrs->sanitize (&sc))) - goto fail; + priv->localSubrs = &StructAtOffsetOrNull<CFF1Subrs> (&privDictStr, priv->subrsOffset, sc); hb_barrier (); } @@ -1437,7 +1416,7 @@ struct cff1 hb_sorted_vector_t<gname_t> *names = glyph_names.get_acquire (); if (unlikely (!names)) { - names = (hb_sorted_vector_t<gname_t> *) hb_calloc (sizeof (hb_sorted_vector_t<gname_t>), 1); + names = (hb_sorted_vector_t<gname_t> *) hb_calloc (1, sizeof (hb_sorted_vector_t<gname_t>)); if (likely (names)) { names->init (); diff --git a/gfx/harfbuzz/src/hb-ot-cff2-table.hh b/gfx/harfbuzz/src/hb-ot-cff2-table.hh index 652748b737..4b3bdc9315 100644 --- a/gfx/harfbuzz/src/hb-ot-cff2-table.hh +++ b/gfx/harfbuzz/src/hb-ot-cff2-table.hh @@ -111,7 +111,7 @@ struct CFF2FDSelect DEFINE_SIZE_MIN (2); }; -struct CFF2VariationStore +struct CFF2ItemVariationStore { bool sanitize (hb_sanitize_context_t *c) const { @@ -122,11 +122,11 @@ struct CFF2VariationStore varStore.sanitize (c)); } - bool serialize (hb_serialize_context_t *c, const CFF2VariationStore *varStore) + bool serialize (hb_serialize_context_t *c, const CFF2ItemVariationStore *varStore) { TRACE_SERIALIZE (this); unsigned int size_ = varStore->get_size (); - CFF2VariationStore *dest = c->allocate_size<CFF2VariationStore> (size_); + CFF2ItemVariationStore *dest = c->allocate_size<CFF2ItemVariationStore> (size_); if (unlikely (!dest)) return_trace (false); hb_memcpy (dest, varStore, size_); return_trace (true); @@ -135,9 +135,9 @@ struct CFF2VariationStore unsigned int get_size () const { return HBUINT16::static_size + size; } HBUINT16 size; - VariationStore varStore; + ItemVariationStore varStore; - DEFINE_SIZE_MIN (2 + VariationStore::min_size); + DEFINE_SIZE_MIN (2 + ItemVariationStore::min_size); }; struct cff2_top_dict_values_t : top_dict_values_t<> @@ -150,8 +150,8 @@ struct cff2_top_dict_values_t : top_dict_values_t<> } void fini () { top_dict_values_t<>::fini (); } - unsigned int vstoreOffset; - unsigned int FDSelectOffset; + int vstoreOffset; + int FDSelectOffset; }; struct cff2_top_dict_opset_t : top_dict_opset_t<> @@ -169,11 +169,11 @@ struct cff2_top_dict_opset_t : top_dict_opset_t<> break; case OpCode_vstore: - dictval.vstoreOffset = env.argStack.pop_uint (); + dictval.vstoreOffset = env.argStack.pop_int (); env.clear_args (); break; case OpCode_FDSelect: - dictval.FDSelectOffset = env.argStack.pop_uint (); + dictval.FDSelectOffset = env.argStack.pop_int (); env.clear_args (); break; @@ -241,7 +241,7 @@ struct cff2_private_dict_values_base_t : dict_values_t<VAL> } void fini () { dict_values_t<VAL>::fini (); } - unsigned int subrsOffset; + int subrsOffset; const CFF2Subrs *localSubrs; unsigned int ivs; }; @@ -295,7 +295,7 @@ struct cff2_private_dict_opset_t : dict_opset_t env.clear_args (); break; case OpCode_Subrs: - dictval.subrsOffset = env.argStack.pop_uint (); + dictval.subrsOffset = env.argStack.pop_int (); env.clear_args (); break; case OpCode_vsindexdict: @@ -344,7 +344,7 @@ struct cff2_private_dict_opset_subset_t : dict_opset_t return; case OpCode_Subrs: - dictval.subrsOffset = env.argStack.pop_uint (); + dictval.subrsOffset = env.argStack.pop_int (); env.clear_args (); break; @@ -426,18 +426,15 @@ struct cff2 if (unlikely (!top_interp.interpret (topDict))) goto fail; } - globalSubrs = &StructAtOffset<CFF2Subrs> (cff2, cff2->topDict + cff2->topDictSize); - varStore = &StructAtOffsetOrNull<CFF2VariationStore> (cff2, topDict.vstoreOffset); - charStrings = &StructAtOffsetOrNull<CFF2CharStrings> (cff2, topDict.charStringsOffset); - fdArray = &StructAtOffsetOrNull<CFF2FDArray> (cff2, topDict.FDArrayOffset); - fdSelect = &StructAtOffsetOrNull<CFF2FDSelect> (cff2, topDict.FDSelectOffset); - - if (((varStore != &Null (CFF2VariationStore)) && unlikely (!varStore->sanitize (&sc))) || - (charStrings == &Null (CFF2CharStrings)) || unlikely (!charStrings->sanitize (&sc)) || - (globalSubrs == &Null (CFF2Subrs)) || unlikely (!globalSubrs->sanitize (&sc)) || - (fdArray == &Null (CFF2FDArray)) || unlikely (!fdArray->sanitize (&sc)) || - !hb_barrier () || - (((fdSelect != &Null (CFF2FDSelect)) && unlikely (!fdSelect->sanitize (&sc, fdArray->count))))) + globalSubrs = &StructAtOffsetOrNull<CFF2Subrs> (cff2, cff2->topDict + cff2->topDictSize, sc); + varStore = &StructAtOffsetOrNull<CFF2ItemVariationStore> (cff2, topDict.vstoreOffset, sc); + charStrings = &StructAtOffsetOrNull<CFF2CharStrings> (cff2, topDict.charStringsOffset, sc); + fdArray = &StructAtOffsetOrNull<CFF2FDArray> (cff2, topDict.FDArrayOffset, sc); + fdSelect = &StructAtOffsetOrNull<CFF2FDSelect> (cff2, topDict.FDSelectOffset, sc, fdArray->count); + + if (charStrings == &Null (CFF2CharStrings) || + globalSubrs == &Null (CFF2Subrs) || + fdArray == &Null (CFF2FDArray)) goto fail; num_glyphs = charStrings->count; @@ -462,19 +459,13 @@ struct cff2 font->init (); if (unlikely (!font_interp.interpret (*font))) goto fail; - const hb_ubytes_t privDictStr = StructAtOffsetOrNull<UnsizedByteStr> (cff2, font->privateDictInfo.offset).as_ubytes (font->privateDictInfo.size); - if (unlikely (!privDictStr.sanitize (&sc))) goto fail; - hb_barrier (); + const hb_ubytes_t privDictStr = StructAtOffsetOrNull<UnsizedByteStr> (cff2, font->privateDictInfo.offset, sc, font->privateDictInfo.size).as_ubytes (font->privateDictInfo.size); cff2_priv_dict_interp_env_t env2 (privDictStr); dict_interpreter_t<PRIVOPSET, PRIVDICTVAL, cff2_priv_dict_interp_env_t> priv_interp (env2); privateDicts[i].init (); if (unlikely (!priv_interp.interpret (privateDicts[i]))) goto fail; - privateDicts[i].localSubrs = &StructAtOffsetOrNull<CFF2Subrs> (&privDictStr[0], privateDicts[i].subrsOffset); - if (privateDicts[i].localSubrs != &Null (CFF2Subrs) && - unlikely (!privateDicts[i].localSubrs->sanitize (&sc))) - goto fail; - hb_barrier (); + privateDicts[i].localSubrs = &StructAtOffsetOrNull<CFF2Subrs> (&privDictStr[0], privateDicts[i].subrsOffset, sc); } return; @@ -509,7 +500,7 @@ struct cff2 hb_blob_t *blob = nullptr; cff2_top_dict_values_t topDict; const CFF2Subrs *globalSubrs = nullptr; - const CFF2VariationStore *varStore = nullptr; + const CFF2ItemVariationStore *varStore = nullptr; const CFF2CharStrings *charStrings = nullptr; const CFF2FDArray *fdArray = nullptr; const CFF2FDSelect *fdSelect = nullptr; diff --git a/gfx/harfbuzz/src/hb-ot-cmap-table.hh b/gfx/harfbuzz/src/hb-ot-cmap-table.hh index e2e2581855..64d2b13880 100644 --- a/gfx/harfbuzz/src/hb-ot-cmap-table.hh +++ b/gfx/harfbuzz/src/hb-ot-cmap-table.hh @@ -41,6 +41,30 @@ namespace OT { +static inline uint8_t unicode_to_macroman (hb_codepoint_t u) +{ + uint16_t mapping[] = { + 0x00C4, 0x00C5, 0x00C7, 0x00C9, 0x00D1, 0x00D6, 0x00DC, 0x00E1, + 0x00E0, 0x00E2, 0x00E4, 0x00E3, 0x00E5, 0x00E7, 0x00E9, 0x00E8, + 0x00EA, 0x00EB, 0x00ED, 0x00EC, 0x00EE, 0x00EF, 0x00F1, 0x00F3, + 0x00F2, 0x00F4, 0x00F6, 0x00F5, 0x00FA, 0x00F9, 0x00FB, 0x00FC, + 0x2020, 0x00B0, 0x00A2, 0x00A3, 0x00A7, 0x2022, 0x00B6, 0x00DF, + 0x00AE, 0x00A9, 0x2122, 0x00B4, 0x00A8, 0x2260, 0x00C6, 0x00D8, + 0x221E, 0x00B1, 0x2264, 0x2265, 0x00A5, 0x00B5, 0x2202, 0x2211, + 0x220F, 0x03C0, 0x222B, 0x00AA, 0x00BA, 0x03A9, 0x00E6, 0x00F8, + 0x00BF, 0x00A1, 0x00AC, 0x221A, 0x0192, 0x2248, 0x2206, 0x00AB, + 0x00BB, 0x2026, 0x00A0, 0x00C0, 0x00C3, 0x00D5, 0x0152, 0x0153, + 0x2013, 0x2014, 0x201C, 0x201D, 0x2018, 0x2019, 0x00F7, 0x25CA, + 0x00FF, 0x0178, 0x2044, 0x20AC, 0x2039, 0x203A, 0xFB01, 0xFB02, + 0x2021, 0x00B7, 0x201A, 0x201E, 0x2030, 0x00C2, 0x00CA, 0x00C1, + 0x00CB, 0x00C8, 0x00CD, 0x00CE, 0x00CF, 0x00CC, 0x00D3, 0x00D4, + 0xF8FF, 0x00D2, 0x00DA, 0x00DB, 0x00D9, 0x0131, 0x02C6, 0x02DC, + 0x00AF, 0x02D8, 0x02D9, 0x02DA, 0x00B8, 0x02DD, 0x02DB, 0x02C7 + }; + uint16_t *c = hb_bsearch (u, mapping, ARRAY_LENGTH (mapping), sizeof (mapping[0]), + _hb_cmp_operator<uint16_t, uint16_t>); + return c ? (c - mapping) + 0x7F : 0; +} struct CmapSubtableFormat0 { @@ -1465,8 +1489,11 @@ struct EncodingRecord int ret; ret = platformID.cmp (other.platformID); if (ret) return ret; - ret = encodingID.cmp (other.encodingID); - if (ret) return ret; + if (other.encodingID != 0xFFFF) + { + ret = encodingID.cmp (other.encodingID); + if (ret) return ret; + } return 0; } @@ -1814,9 +1841,13 @@ struct cmap c->plan)); } - const CmapSubtable *find_best_subtable (bool *symbol = nullptr) const + const CmapSubtable *find_best_subtable (bool *symbol = nullptr, + bool *mac = nullptr, + bool *macroman = nullptr) const { if (symbol) *symbol = false; + if (mac) *mac = false; + if (macroman) *macroman = false; const CmapSubtable *subtable; @@ -1841,6 +1872,20 @@ struct cmap if ((subtable = this->find_subtable (0, 1))) return subtable; if ((subtable = this->find_subtable (0, 0))) return subtable; + /* MacRoman subtable. */ + if ((subtable = this->find_subtable (1, 0))) + { + if (mac) *mac = true; + if (macroman) *macroman = true; + return subtable; + } + /* Any other Mac subtable; we just map ASCII for these. */ + if ((subtable = this->find_subtable (1, 0xFFFF))) + { + if (mac) *mac = true; + return subtable; + } + /* Meh. */ return &Null (CmapSubtable); } @@ -1852,8 +1897,8 @@ struct cmap accelerator_t (hb_face_t *face) { this->table = hb_sanitize_context_t ().reference_table<cmap> (face); - bool symbol; - this->subtable = table->find_best_subtable (&symbol); + bool symbol, mac, macroman; + this->subtable = table->find_best_subtable (&symbol, &mac, ¯oman); this->subtable_uvs = &Null (CmapSubtableFormat14); { const CmapSubtable *st = table->find_subtable (0, 5); @@ -1862,6 +1907,7 @@ struct cmap } this->get_glyph_data = subtable; +#ifndef HB_NO_CMAP_LEGACY_SUBTABLES if (unlikely (symbol)) { switch ((unsigned) face->table.OS2->get_font_page ()) { @@ -1881,7 +1927,16 @@ struct cmap break; } } + else if (unlikely (macroman)) + { + this->get_glyph_funcZ = get_glyph_from_macroman<CmapSubtable>; + } + else if (unlikely (mac)) + { + this->get_glyph_funcZ = get_glyph_from_ascii<CmapSubtable>; + } else +#endif { switch (subtable->u.format) { /* Accelerate format 4 and format 12. */ @@ -1924,7 +1979,7 @@ struct cmap hb_codepoint_t *glyph, cache_t *cache = nullptr) const { - if (unlikely (!this->get_glyph_funcZ)) return 0; + if (unlikely (!this->get_glyph_funcZ)) return false; return _cached_get (unicode, glyph, cache); } @@ -2006,6 +2061,28 @@ struct cmap return false; } + template <typename Type> + HB_INTERNAL static bool get_glyph_from_ascii (const void *obj, + hb_codepoint_t codepoint, + hb_codepoint_t *glyph) + { + const Type *typed_obj = (const Type *) obj; + return codepoint < 0x80 && typed_obj->get_glyph (codepoint, glyph); + } + + template <typename Type> + HB_INTERNAL static bool get_glyph_from_macroman (const void *obj, + hb_codepoint_t codepoint, + hb_codepoint_t *glyph) + { + if (get_glyph_from_ascii<Type> (obj, codepoint, glyph)) + return true; + + const Type *typed_obj = (const Type *) obj; + unsigned c = unicode_to_macroman (codepoint); + return c && typed_obj->get_glyph (c, glyph); + } + private: hb_nonnull_ptr_t<const CmapSubtable> subtable; hb_nonnull_ptr_t<const CmapSubtableFormat14> subtable_uvs; @@ -2035,28 +2112,6 @@ struct cmap return &(this+result.subtable); } - const EncodingRecord *find_encodingrec (unsigned int platform_id, - unsigned int encoding_id) const - { - EncodingRecord key; - key.platformID = platform_id; - key.encodingID = encoding_id; - - return encodingRecord.as_array ().bsearch (key); - } - - bool find_subtable (unsigned format) const - { - auto it = - + hb_iter (encodingRecord) - | hb_map (&EncodingRecord::subtable) - | hb_map (hb_add (this)) - | hb_filter ([&] (const CmapSubtable& _) { return _.u.format == format; }) - ; - - return it.len (); - } - public: bool sanitize (hb_sanitize_context_t *c) const diff --git a/gfx/harfbuzz/src/hb-ot-font.cc b/gfx/harfbuzz/src/hb-ot-font.cc index b3677c6a4c..1da869d697 100644 --- a/gfx/harfbuzz/src/hb-ot-font.cc +++ b/gfx/harfbuzz/src/hb-ot-font.cc @@ -208,12 +208,12 @@ hb_ot_get_glyph_h_advances (hb_font_t* font, void* font_data, #if !defined(HB_NO_VAR) && !defined(HB_NO_OT_FONT_ADVANCE_CACHE) const OT::HVAR &HVAR = *hmtx.var_table; - const OT::VariationStore &varStore = &HVAR + HVAR.varStore; - OT::VariationStore::cache_t *varStore_cache = font->num_coords * count >= 128 ? varStore.create_cache () : nullptr; + const OT::ItemVariationStore &varStore = &HVAR + HVAR.varStore; + OT::ItemVariationStore::cache_t *varStore_cache = font->num_coords * count >= 128 ? varStore.create_cache () : nullptr; bool use_cache = font->num_coords; #else - OT::VariationStore::cache_t *varStore_cache = nullptr; + OT::ItemVariationStore::cache_t *varStore_cache = nullptr; bool use_cache = false; #endif @@ -277,7 +277,7 @@ hb_ot_get_glyph_h_advances (hb_font_t* font, void* font_data, } #if !defined(HB_NO_VAR) && !defined(HB_NO_OT_FONT_ADVANCE_CACHE) - OT::VariationStore::destroy_cache (varStore_cache); + OT::ItemVariationStore::destroy_cache (varStore_cache); #endif if (font->x_strength && !font->embolden_in_place) @@ -313,10 +313,10 @@ hb_ot_get_glyph_v_advances (hb_font_t* font, void* font_data, { #if !defined(HB_NO_VAR) && !defined(HB_NO_OT_FONT_ADVANCE_CACHE) const OT::VVAR &VVAR = *vmtx.var_table; - const OT::VariationStore &varStore = &VVAR + VVAR.varStore; - OT::VariationStore::cache_t *varStore_cache = font->num_coords ? varStore.create_cache () : nullptr; + const OT::ItemVariationStore &varStore = &VVAR + VVAR.varStore; + OT::ItemVariationStore::cache_t *varStore_cache = font->num_coords ? varStore.create_cache () : nullptr; #else - OT::VariationStore::cache_t *varStore_cache = nullptr; + OT::ItemVariationStore::cache_t *varStore_cache = nullptr; #endif for (unsigned int i = 0; i < count; i++) @@ -327,7 +327,7 @@ hb_ot_get_glyph_v_advances (hb_font_t* font, void* font_data, } #if !defined(HB_NO_VAR) && !defined(HB_NO_OT_FONT_ADVANCE_CACHE) - OT::VariationStore::destroy_cache (varStore_cache); + OT::ItemVariationStore::destroy_cache (varStore_cache); #endif } else diff --git a/gfx/harfbuzz/src/hb-ot-hmtx-table.hh b/gfx/harfbuzz/src/hb-ot-hmtx-table.hh index 89640b43f1..48bd536121 100644 --- a/gfx/harfbuzz/src/hb-ot-hmtx-table.hh +++ b/gfx/harfbuzz/src/hb-ot-hmtx-table.hh @@ -145,6 +145,29 @@ struct hmtxvmtx table->minTrailingBearing = min_rsb; table->maxExtent = max_extent; } + + if (T::is_horizontal) + { + const auto &OS2 = *c->plan->source->table.OS2; + if (OS2.has_data () && + table->ascender == OS2.sTypoAscender && + table->descender == OS2.sTypoDescender && + table->lineGap == OS2.sTypoLineGap) + { + table->ascender = static_cast<int> (roundf (OS2.sTypoAscender + + MVAR.get_var (HB_OT_METRICS_TAG_HORIZONTAL_ASCENDER, + c->plan->normalized_coords.arrayZ, + c->plan->normalized_coords.length))); + table->descender = static_cast<int> (roundf (OS2.sTypoDescender + + MVAR.get_var (HB_OT_METRICS_TAG_HORIZONTAL_DESCENDER, + c->plan->normalized_coords.arrayZ, + c->plan->normalized_coords.length))); + table->lineGap = static_cast<int> (roundf (OS2.sTypoLineGap + + MVAR.get_var (HB_OT_METRICS_TAG_HORIZONTAL_LINE_GAP, + c->plan->normalized_coords.arrayZ, + c->plan->normalized_coords.length))); + } + } } #endif @@ -374,7 +397,7 @@ struct hmtxvmtx unsigned get_advance_with_var_unscaled (hb_codepoint_t glyph, hb_font_t *font, - VariationStore::cache_t *store_cache = nullptr) const + ItemVariationStore::cache_t *store_cache = nullptr) const { unsigned int advance = get_advance_without_var_unscaled (glyph); diff --git a/gfx/harfbuzz/src/hb-ot-layout-base-table.hh b/gfx/harfbuzz/src/hb-ot-layout-base-table.hh index a23b6377d1..0278399069 100644 --- a/gfx/harfbuzz/src/hb-ot-layout-base-table.hh +++ b/gfx/harfbuzz/src/hb-ot-layout-base-table.hh @@ -46,6 +46,12 @@ struct BaseCoordFormat1 return HB_DIRECTION_IS_HORIZONTAL (direction) ? font->em_scale_y (coordinate) : font->em_scale_x (coordinate); } + bool subset (hb_subset_context_t *c) const + { + TRACE_SUBSET (this); + return_trace ((bool) c->serializer->embed (*this)); + } + bool sanitize (hb_sanitize_context_t *c) const { TRACE_SANITIZE (this); @@ -67,6 +73,17 @@ struct BaseCoordFormat2 return HB_DIRECTION_IS_HORIZONTAL (direction) ? font->em_scale_y (coordinate) : font->em_scale_x (coordinate); } + bool subset (hb_subset_context_t *c) const + { + TRACE_SUBSET (this); + auto *out = c->serializer->embed (*this); + if (unlikely (!out)) return_trace (false); + + return_trace (c->serializer->check_assign (out->referenceGlyph, + c->plan->glyph_map->get (referenceGlyph), + HB_SERIALIZE_ERROR_INT_OVERFLOW)); + } + bool sanitize (hb_sanitize_context_t *c) const { TRACE_SANITIZE (this); @@ -86,7 +103,7 @@ struct BaseCoordFormat2 struct BaseCoordFormat3 { hb_position_t get_coord (hb_font_t *font, - const VariationStore &var_store, + const ItemVariationStore &var_store, hb_direction_t direction) const { const Device &device = this+deviceTable; @@ -96,6 +113,23 @@ struct BaseCoordFormat3 : font->em_scale_x (coordinate) + device.get_x_delta (font, var_store); } + void collect_variation_indices (hb_set_t& varidx_set /* OUT */) const + { + unsigned varidx = (this+deviceTable).get_variation_index (); + varidx_set.add (varidx); + } + + bool subset (hb_subset_context_t *c) const + { + TRACE_SUBSET (this); + auto *out = c->serializer->embed (*this); + if (unlikely (!out)) return_trace (false); + + return_trace (out->deviceTable.serialize_copy (c->serializer, deviceTable, + this, 0, + hb_serialize_context_t::Head, + &c->plan->base_variation_idx_map)); + } bool sanitize (hb_sanitize_context_t *c) const { @@ -120,7 +154,7 @@ struct BaseCoord bool has_data () const { return u.format; } hb_position_t get_coord (hb_font_t *font, - const VariationStore &var_store, + const ItemVariationStore &var_store, hb_direction_t direction) const { switch (u.format) { @@ -131,6 +165,27 @@ struct BaseCoord } } + void collect_variation_indices (hb_set_t& varidx_set /* OUT */) const + { + switch (u.format) { + case 3: u.format3.collect_variation_indices (varidx_set); + default:return; + } + } + + template <typename context_t, typename ...Ts> + typename context_t::return_t dispatch (context_t *c, Ts&&... ds) const + { + if (unlikely (!c->may_dispatch (this, &u.format))) return c->no_dispatch_return_value (); + TRACE_DISPATCH (this, u.format); + switch (u.format) { + case 1: return_trace (c->dispatch (u.format1, std::forward<Ts> (ds)...)); + case 2: return_trace (c->dispatch (u.format2, std::forward<Ts> (ds)...)); + case 3: return_trace (c->dispatch (u.format3, std::forward<Ts> (ds)...)); + default:return_trace (c->default_return_value ()); + } + } + bool sanitize (hb_sanitize_context_t *c) const { TRACE_SANITIZE (this); @@ -161,12 +216,37 @@ struct FeatMinMaxRecord bool has_data () const { return tag; } + hb_tag_t get_feature_tag () const { return tag; } + void get_min_max (const BaseCoord **min, const BaseCoord **max) const { if (likely (min)) *min = &(this+minCoord); if (likely (max)) *max = &(this+maxCoord); } + void collect_variation_indices (const hb_subset_plan_t* plan, + const void *base, + hb_set_t& varidx_set /* OUT */) const + { + if (!plan->layout_features.has (tag)) + return; + + (base+minCoord).collect_variation_indices (varidx_set); + (base+maxCoord).collect_variation_indices (varidx_set); + } + + bool subset (hb_subset_context_t *c, + const void *base) const + { + TRACE_SUBSET (this); + auto *out = c->serializer->embed (*this); + if (unlikely (!out)) return_trace (false); + if (!(out->minCoord.serialize_subset (c, minCoord, base))) + return_trace (false); + + return_trace (out->maxCoord.serialize_subset (c, maxCoord, base)); + } + bool sanitize (hb_sanitize_context_t *c, const void *base) const { TRACE_SANITIZE (this); @@ -206,6 +286,39 @@ struct MinMax } } + void collect_variation_indices (const hb_subset_plan_t* plan, + hb_set_t& varidx_set /* OUT */) const + { + (this+minCoord).collect_variation_indices (varidx_set); + (this+maxCoord).collect_variation_indices (varidx_set); + for (const FeatMinMaxRecord& record : featMinMaxRecords) + record.collect_variation_indices (plan, this, varidx_set); + } + + bool subset (hb_subset_context_t *c) const + { + TRACE_SUBSET (this); + auto *out = c->serializer->start_embed (*this); + if (unlikely (!out || !c->serializer->extend_min (out))) return_trace (false); + + if (!(out->minCoord.serialize_subset (c, minCoord, this)) || + !(out->maxCoord.serialize_subset (c, maxCoord, this))) + return_trace (false); + + unsigned len = 0; + for (const FeatMinMaxRecord& _ : featMinMaxRecords) + { + hb_tag_t feature_tag = _.get_feature_tag (); + if (!c->plan->layout_features.has (feature_tag)) + continue; + + if (!_.subset (c, this)) return false; + len++; + } + return_trace (c->serializer->check_assign (out->featMinMaxRecords.len, len, + HB_SERIALIZE_ERROR_INT_OVERFLOW)); + } + bool sanitize (hb_sanitize_context_t *c) const { TRACE_SANITIZE (this); @@ -240,6 +353,26 @@ struct BaseValues return this+baseCoords[baseline_tag_index]; } + void collect_variation_indices (hb_set_t& varidx_set /* OUT */) const + { + for (const auto& _ : baseCoords) + (this+_).collect_variation_indices (varidx_set); + } + + bool subset (hb_subset_context_t *c) const + { + TRACE_SUBSET (this); + auto *out = c->serializer->start_embed (*this); + if (unlikely (!out || !c->serializer->extend_min (out))) return_trace (false); + out->defaultIndex = defaultIndex; + + for (const auto& _ : baseCoords) + if (!subset_offset_array (c, out->baseCoords, this) (_)) + return_trace (false); + + return_trace (bool (out->baseCoords)); + } + bool sanitize (hb_sanitize_context_t *c) const { TRACE_SANITIZE (this); @@ -270,6 +403,20 @@ struct BaseLangSysRecord const MinMax &get_min_max () const { return this+minMax; } + void collect_variation_indices (const hb_subset_plan_t* plan, + hb_set_t& varidx_set /* OUT */) const + { (this+minMax).collect_variation_indices (plan, varidx_set); } + + bool subset (hb_subset_context_t *c, + const void *base) const + { + TRACE_SUBSET (this); + auto *out = c->serializer->embed (*this); + if (unlikely (!out)) return_trace (false); + + return_trace (out->minMax.serialize_subset (c, minMax, base)); + } + bool sanitize (hb_sanitize_context_t *c, const void *base) const { TRACE_SANITIZE (this); @@ -300,6 +447,35 @@ struct BaseScript bool has_values () const { return baseValues; } bool has_min_max () const { return defaultMinMax; /* TODO What if only per-language is present? */ } + void collect_variation_indices (const hb_subset_plan_t* plan, + hb_set_t& varidx_set /* OUT */) const + { + (this+baseValues).collect_variation_indices (varidx_set); + (this+defaultMinMax).collect_variation_indices (plan, varidx_set); + + for (const BaseLangSysRecord& _ : baseLangSysRecords) + _.collect_variation_indices (plan, varidx_set); + } + + bool subset (hb_subset_context_t *c) const + { + TRACE_SUBSET (this); + auto *out = c->serializer->start_embed (*this); + if (unlikely (!out || !c->serializer->extend_min (out))) return_trace (false); + + if (baseValues && !out->baseValues.serialize_subset (c, baseValues, this)) + return_trace (false); + + if (defaultMinMax && !out->defaultMinMax.serialize_subset (c, defaultMinMax, this)) + return_trace (false); + + for (const auto& _ : baseLangSysRecords) + if (!_.subset (c, this)) return_trace (false); + + return_trace (c->serializer->check_assign (out->baseLangSysRecords.len, baseLangSysRecords.len, + HB_SERIALIZE_ERROR_INT_OVERFLOW)); + } + bool sanitize (hb_sanitize_context_t *c) const { TRACE_SANITIZE (this); @@ -332,9 +508,31 @@ struct BaseScriptRecord bool has_data () const { return baseScriptTag; } + hb_tag_t get_script_tag () const { return baseScriptTag; } + const BaseScript &get_base_script (const BaseScriptList *list) const { return list+baseScript; } + void collect_variation_indices (const hb_subset_plan_t* plan, + const void* list, + hb_set_t& varidx_set /* OUT */) const + { + if (!plan->layout_scripts.has (baseScriptTag)) + return; + + (list+baseScript).collect_variation_indices (plan, varidx_set); + } + + bool subset (hb_subset_context_t *c, + const void *base) const + { + TRACE_SUBSET (this); + auto *out = c->serializer->embed (*this); + if (unlikely (!out)) return_trace (false); + + return_trace (out->baseScript.serialize_subset (c, baseScript, base)); + } + bool sanitize (hb_sanitize_context_t *c, const void *base) const { TRACE_SANITIZE (this); @@ -361,6 +559,33 @@ struct BaseScriptList return record->has_data () ? record->get_base_script (this) : Null (BaseScript); } + void collect_variation_indices (const hb_subset_plan_t* plan, + hb_set_t& varidx_set /* OUT */) const + { + for (const BaseScriptRecord& _ : baseScriptRecords) + _.collect_variation_indices (plan, this, varidx_set); + } + + bool subset (hb_subset_context_t *c) const + { + TRACE_SUBSET (this); + auto *out = c->serializer->start_embed (*this); + if (unlikely (!out || !c->serializer->extend_min (out))) return_trace (false); + + unsigned len = 0; + for (const BaseScriptRecord& _ : baseScriptRecords) + { + hb_tag_t script_tag = _.get_script_tag (); + if (!c->plan->layout_scripts.has (script_tag)) + continue; + + if (!_.subset (c, this)) return false; + len++; + } + return_trace (c->serializer->check_assign (out->baseScriptRecords.len, len, + HB_SERIALIZE_ERROR_INT_OVERFLOW)); + } + bool sanitize (hb_sanitize_context_t *c) const { TRACE_SANITIZE (this); @@ -422,6 +647,20 @@ struct Axis return true; } + void collect_variation_indices (const hb_subset_plan_t* plan, + hb_set_t& varidx_set /* OUT */) const + { (this+baseScriptList).collect_variation_indices (plan, varidx_set); } + + bool subset (hb_subset_context_t *c) const + { + TRACE_SUBSET (this); + auto *out = c->serializer->embed (*this); + if (unlikely (!out)) return_trace (false); + + out->baseTagList.serialize_copy (c->serializer, baseTagList, this); + return_trace (out->baseScriptList.serialize_subset (c, baseScriptList, this)); + } + bool sanitize (hb_sanitize_context_t *c) const { TRACE_SANITIZE (this); @@ -453,8 +692,41 @@ struct BASE const Axis &get_axis (hb_direction_t direction) const { return HB_DIRECTION_IS_VERTICAL (direction) ? this+vAxis : this+hAxis; } - const VariationStore &get_var_store () const - { return version.to_int () < 0x00010001u ? Null (VariationStore) : this+varStore; } + bool has_var_store () const + { return version.to_int () >= 0x00010001u && varStore != 0; } + + const ItemVariationStore &get_var_store () const + { return version.to_int () < 0x00010001u ? Null (ItemVariationStore) : this+varStore; } + + void collect_variation_indices (const hb_subset_plan_t* plan, + hb_set_t& varidx_set /* OUT */) const + { + (this+hAxis).collect_variation_indices (plan, varidx_set); + (this+vAxis).collect_variation_indices (plan, varidx_set); + } + + bool subset (hb_subset_context_t *c) const + { + TRACE_SUBSET (this); + auto *out = c->serializer->start_embed (*this); + if (unlikely (!out || !c->serializer->extend_min (out))) return_trace (false); + + out->version = version; + if (hAxis && !out->hAxis.serialize_subset (c, hAxis, this)) + return_trace (false); + + if (vAxis && !out->vAxis.serialize_subset (c, vAxis, this)) + return_trace (false); + + if (has_var_store ()) + { + if (!c->serializer->allocate_size<Offset32To<ItemVariationStore>> (Offset32To<ItemVariationStore>::static_size)) + return_trace (false); + return_trace (out->varStore.serialize_subset (c, varStore, this, c->plan->base_varstore_inner_maps.as_array ())); + } + + return_trace (true); + } bool get_baseline (hb_font_t *font, hb_tag_t baseline_tag, @@ -487,7 +759,7 @@ struct BASE &min_coord, &max_coord)) return false; - const VariationStore &var_store = get_var_store (); + const ItemVariationStore &var_store = get_var_store (); if (likely (min && min_coord)) *min = min_coord->get_coord (font, var_store, direction); if (likely (max && max_coord)) *max = max_coord->get_coord (font, var_store, direction); return true; @@ -510,7 +782,7 @@ struct BASE * of BASE table (may be NULL) */ Offset16To<Axis>vAxis; /* Offset to vertical Axis table, from beginning * of BASE table (may be NULL) */ - Offset32To<VariationStore> + Offset32To<ItemVariationStore> varStore; /* Offset to the table of Item Variation * Store--from beginning of BASE * header (may be NULL). Introduced diff --git a/gfx/harfbuzz/src/hb-ot-layout-common.hh b/gfx/harfbuzz/src/hb-ot-layout-common.hh index 6b359cceb7..aba427368c 100644 --- a/gfx/harfbuzz/src/hb-ot-layout-common.hh +++ b/gfx/harfbuzz/src/hb-ot-layout-common.hh @@ -188,7 +188,7 @@ struct hb_subset_layout_context_t : unsigned lookup_index_count; }; -struct VariationStore; +struct ItemVariationStore; struct hb_collect_variation_indices_context_t : hb_dispatch_context_t<hb_collect_variation_indices_context_t> { @@ -3036,7 +3036,7 @@ struct VarData DEFINE_SIZE_ARRAY (6, regionIndices); }; -struct VariationStore +struct ItemVariationStore { friend struct item_variations_t; using cache_t = VarRegionList::cache_t; @@ -3141,7 +3141,7 @@ struct VariationStore } bool serialize (hb_serialize_context_t *c, - const VariationStore *src, + const ItemVariationStore *src, const hb_array_t <const hb_inc_bimap_t> &inner_maps) { TRACE_SERIALIZE (this); @@ -3197,7 +3197,7 @@ struct VariationStore return_trace (true); } - VariationStore *copy (hb_serialize_context_t *c) const + ItemVariationStore *copy (hb_serialize_context_t *c) const { TRACE_SERIALIZE (this); auto *out = c->start_embed (this); @@ -3227,7 +3227,7 @@ struct VariationStore return_trace (false); #endif - VariationStore *varstore_prime = c->serializer->start_embed<VariationStore> (); + ItemVariationStore *varstore_prime = c->serializer->start_embed<ItemVariationStore> (); if (unlikely (!varstore_prime)) return_trace (false); varstore_prime->serialize (c->serializer, this, inner_maps); @@ -4030,13 +4030,13 @@ struct VariationDevice private: hb_position_t get_x_delta (hb_font_t *font, - const VariationStore &store, - VariationStore::cache_t *store_cache = nullptr) const + const ItemVariationStore &store, + ItemVariationStore::cache_t *store_cache = nullptr) const { return font->em_scalef_x (get_delta (font, store, store_cache)); } hb_position_t get_y_delta (hb_font_t *font, - const VariationStore &store, - VariationStore::cache_t *store_cache = nullptr) const + const ItemVariationStore &store, + ItemVariationStore::cache_t *store_cache = nullptr) const { return font->em_scalef_y (get_delta (font, store, store_cache)); } VariationDevice* copy (hb_serialize_context_t *c, @@ -4070,10 +4070,10 @@ struct VariationDevice private: float get_delta (hb_font_t *font, - const VariationStore &store, - VariationStore::cache_t *store_cache = nullptr) const + const ItemVariationStore &store, + ItemVariationStore::cache_t *store_cache = nullptr) const { - return store.get_delta (varIdx, font->coords, font->num_coords, (VariationStore::cache_t *) store_cache); + return store.get_delta (varIdx, font->coords, font->num_coords, (ItemVariationStore::cache_t *) store_cache); } protected: @@ -4097,8 +4097,8 @@ struct DeviceHeader struct Device { hb_position_t get_x_delta (hb_font_t *font, - const VariationStore &store=Null (VariationStore), - VariationStore::cache_t *store_cache = nullptr) const + const ItemVariationStore &store=Null (ItemVariationStore), + ItemVariationStore::cache_t *store_cache = nullptr) const { switch (u.b.format) { @@ -4115,8 +4115,8 @@ struct Device } } hb_position_t get_y_delta (hb_font_t *font, - const VariationStore &store=Null (VariationStore), - VariationStore::cache_t *store_cache = nullptr) const + const ItemVariationStore &store=Null (ItemVariationStore), + ItemVariationStore::cache_t *store_cache = nullptr) const { switch (u.b.format) { diff --git a/gfx/harfbuzz/src/hb-ot-layout-gsubgpos.hh b/gfx/harfbuzz/src/hb-ot-layout-gsubgpos.hh index 499ad673e4..162179d08a 100644 --- a/gfx/harfbuzz/src/hb-ot-layout-gsubgpos.hh +++ b/gfx/harfbuzz/src/hb-ot-layout-gsubgpos.hh @@ -708,8 +708,8 @@ struct hb_ot_apply_context_t : recurse_func_t recurse_func = nullptr; const GDEF &gdef; const GDEF::accelerator_t &gdef_accel; - const VariationStore &var_store; - VariationStore::cache_t *var_store_cache; + const ItemVariationStore &var_store; + ItemVariationStore::cache_t *var_store_cache; hb_set_digest_t digest; hb_direction_t direction; @@ -766,7 +766,7 @@ struct hb_ot_apply_context_t : ~hb_ot_apply_context_t () { #ifndef HB_NO_VAR - VariationStore::destroy_cache (var_store_cache); + ItemVariationStore::destroy_cache (var_store_cache); #endif } diff --git a/gfx/harfbuzz/src/hb-ot-layout.cc b/gfx/harfbuzz/src/hb-ot-layout.cc index 2eb8535db5..a4c13abadf 100644 --- a/gfx/harfbuzz/src/hb-ot-layout.cc +++ b/gfx/harfbuzz/src/hb-ot-layout.cc @@ -2127,7 +2127,7 @@ hb_ot_layout_get_font_extents (hb_font_t *font, hb_tag_t language_tag, hb_font_extents_t *extents) { - hb_position_t min, max; + hb_position_t min = 0, max = 0; if (font->face->table.BASE->get_min_max (font, direction, script_tag, language_tag, HB_TAG_NONE, &min, &max)) { diff --git a/gfx/harfbuzz/src/hb-ot-math-table.hh b/gfx/harfbuzz/src/hb-ot-math-table.hh index 32e497aef6..5839059fde 100644 --- a/gfx/harfbuzz/src/hb-ot-math-table.hh +++ b/gfx/harfbuzz/src/hb-ot-math-table.hh @@ -344,27 +344,20 @@ struct MathKern const MathValueRecord* kernValue = mathValueRecordsZ.arrayZ + heightCount; int sign = font->y_scale < 0 ? -1 : +1; - /* The description of the MathKern table is a ambiguous, but interpreting - * "between the two heights found at those indexes" for 0 < i < len as - * - * correctionHeight[i-1] < correction_height <= correctionHeight[i] - * - * makes the result consistent with the limit cases and we can just use the - * binary search algorithm of std::upper_bound: + /* According to OpenType spec (v1.9), except for the boundary cases, the index + * chosen for kern value should be i such that + * correctionHeight[i-1] <= correction_height < correctionHeight[i] + * We can use the binary search algorithm of std::upper_bound(). Or, we can + * use the internal hb_bsearch_impl. */ - unsigned int i = 0; - unsigned int count = heightCount; - while (count > 0) - { - unsigned int half = count / 2; - hb_position_t height = correctionHeight[i + half].get_y_value (font, this); - if (sign * height < sign * correction_height) - { - i += half + 1; - count -= half + 1; - } else - count = half; - } + unsigned int pos; + auto cmp = +[](const void* key, const void* p, + int sign, hb_font_t* font, const MathKern* mathKern) -> int { + return sign * *(hb_position_t*)key - sign * ((MathValueRecord*)p)->get_y_value(font, mathKern); + }; + unsigned int i = hb_bsearch_impl(&pos, correction_height, correctionHeight, + heightCount, MathValueRecord::static_size, + cmp, sign, font, this) ? pos + 1 : pos; return kernValue[i].get_x_value (font, this); } diff --git a/gfx/harfbuzz/src/hb-ot-shaper-arabic.cc b/gfx/harfbuzz/src/hb-ot-shaper-arabic.cc index 72dcc84df5..d70746ed2b 100644 --- a/gfx/harfbuzz/src/hb-ot-shaper-arabic.cc +++ b/gfx/harfbuzz/src/hb-ot-shaper-arabic.cc @@ -560,9 +560,9 @@ apply_stch (const hb_ot_shape_plan_t *plan HB_UNUSED, DEBUG_MSG (ARABIC, nullptr, "%s stretch at (%u,%u,%u)", step == MEASURE ? "measuring" : "cutting", context, start, end); - DEBUG_MSG (ARABIC, nullptr, "rest of word: count=%u width %d", start - context, w_total); - DEBUG_MSG (ARABIC, nullptr, "fixed tiles: count=%d width=%d", n_fixed, w_fixed); - DEBUG_MSG (ARABIC, nullptr, "repeating tiles: count=%d width=%d", n_repeating, w_repeating); + DEBUG_MSG (ARABIC, nullptr, "rest of word: count=%u width %" PRId32, start - context, w_total); + DEBUG_MSG (ARABIC, nullptr, "fixed tiles: count=%d width=%" PRId32, n_fixed, w_fixed); + DEBUG_MSG (ARABIC, nullptr, "repeating tiles: count=%d width=%" PRId32, n_repeating, w_repeating); /* Number of additional times to repeat each repeating tile. */ int n_copies = 0; @@ -602,7 +602,7 @@ apply_stch (const hb_ot_shape_plan_t *plan HB_UNUSED, if (info[k - 1].arabic_shaping_action() == STCH_REPEATING) repeat += n_copies; - DEBUG_MSG (ARABIC, nullptr, "appending %u copies of glyph %u; j=%u", + DEBUG_MSG (ARABIC, nullptr, "appending %u copies of glyph %" PRIu32 "; j=%u", repeat, info[k - 1].codepoint, j); pos[k - 1].x_advance = 0; for (unsigned int n = 0; n < repeat; n++) diff --git a/gfx/harfbuzz/src/hb-ot-stat-table.hh b/gfx/harfbuzz/src/hb-ot-stat-table.hh index 58b3cd74df..e88c82a13c 100644 --- a/gfx/harfbuzz/src/hb-ot-stat-table.hh +++ b/gfx/harfbuzz/src/hb-ot-stat-table.hh @@ -349,7 +349,7 @@ struct AxisValueFormat4 struct AxisValue { - bool get_value (unsigned int axis_index) const + float get_value (unsigned int axis_index) const { switch (u.format) { @@ -357,7 +357,7 @@ struct AxisValue case 2: return u.format2.get_value (); case 3: return u.format3.get_value (); case 4: return u.format4.get_axis_record (axis_index).get_value (); - default:return 0; + default:return 0.f; } } @@ -485,7 +485,7 @@ struct STAT hb_array_t<const Offset16To<AxisValue>> axis_values = get_axis_value_offsets (); for (unsigned int i = 0; i < axis_values.length; i++) { - const AxisValue& axis_value = this+axis_values[i]; + const AxisValue& axis_value = this+offsetToAxisValueOffsets+axis_values[i]; if (axis_value.get_axis_index () == axis_index) { if (value) diff --git a/gfx/harfbuzz/src/hb-ot-tag-table.hh b/gfx/harfbuzz/src/hb-ot-tag-table.hh index 032a7c866c..db92f4664a 100644 --- a/gfx/harfbuzz/src/hb-ot-tag-table.hh +++ b/gfx/harfbuzz/src/hb-ot-tag-table.hh @@ -6,8 +6,8 @@ * * on files with these headers: * - * <meta name="updated_at" content="2022-09-30 11:47 PM" /> - * File-Date: 2023-08-02 + * <meta name="updated_at" content="2023-09-30 01:21 AM" /> + * File-Date: 2024-03-07 */ #ifndef HB_OT_TAG_TABLE_HH @@ -31,7 +31,7 @@ static const LangTag ot_languages2[] = { {HB_TAG('b','i',' ',' '), HB_TAG('B','I','S',' ')}, /* Bislama */ {HB_TAG('b','i',' ',' '), HB_TAG('C','P','P',' ')}, /* Bislama -> Creoles */ {HB_TAG('b','m',' ',' '), HB_TAG('B','M','B',' ')}, /* Bambara (Bamanankan) */ - {HB_TAG('b','n',' ',' '), HB_TAG('B','E','N',' ')}, /* Bengali */ + {HB_TAG('b','n',' ',' '), HB_TAG('B','E','N',' ')}, /* Bangla */ {HB_TAG('b','o',' ',' '), HB_TAG('T','I','B',' ')}, /* Tibetan */ {HB_TAG('b','r',' ',' '), HB_TAG('B','R','E',' ')}, /* Breton */ {HB_TAG('b','s',' ',' '), HB_TAG('B','O','S',' ')}, /* Bosnian */ @@ -64,7 +64,7 @@ static const LangTag ot_languages2[] = { {HB_TAG('f','r',' ',' '), HB_TAG('F','R','A',' ')}, /* French */ {HB_TAG('f','y',' ',' '), HB_TAG('F','R','I',' ')}, /* Western Frisian -> Frisian */ {HB_TAG('g','a',' ',' '), HB_TAG('I','R','I',' ')}, /* Irish */ - {HB_TAG('g','d',' ',' '), HB_TAG('G','A','E',' ')}, /* Scottish Gaelic (Gaelic) */ + {HB_TAG('g','d',' ',' '), HB_TAG('G','A','E',' ')}, /* Scottish Gaelic */ {HB_TAG('g','l',' ',' '), HB_TAG('G','A','L',' ')}, /* Galician */ {HB_TAG('g','n',' ',' '), HB_TAG('G','U','A',' ')}, /* Guarani [macrolanguage] */ {HB_TAG('g','u',' ',' '), HB_TAG('G','U','J',' ')}, /* Gujarati */ @@ -132,7 +132,7 @@ static const LangTag ot_languages2[] = { {HB_TAG('m','l',' ',' '), HB_TAG('M','A','L',' ')}, /* Malayalam -> Malayalam Traditional */ {HB_TAG('m','l',' ',' '), HB_TAG('M','L','R',' ')}, /* Malayalam -> Malayalam Reformed */ {HB_TAG('m','n',' ',' '), HB_TAG('M','N','G',' ')}, /* Mongolian [macrolanguage] */ - {HB_TAG('m','o',' ',' '), HB_TAG('M','O','L',' ')}, /* Moldavian (retired code) */ + {HB_TAG('m','o',' ',' '), HB_TAG('M','O','L',' ')}, /* Moldavian (retired code) -> Romanian (Moldova) */ {HB_TAG('m','o',' ',' '), HB_TAG('R','O','M',' ')}, /* Moldavian (retired code) -> Romanian */ {HB_TAG('m','r',' ',' '), HB_TAG('M','A','R',' ')}, /* Marathi */ {HB_TAG('m','s',' ',' '), HB_TAG('M','L','Y',' ')}, /* Malay [macrolanguage] */ @@ -153,7 +153,7 @@ static const LangTag ot_languages2[] = { {HB_TAG('o','c',' ',' '), HB_TAG('O','C','I',' ')}, /* Occitan (post 1500) */ {HB_TAG('o','j',' ',' '), HB_TAG('O','J','B',' ')}, /* Ojibwa [macrolanguage] -> Ojibway */ {HB_TAG('o','m',' ',' '), HB_TAG('O','R','O',' ')}, /* Oromo [macrolanguage] */ - {HB_TAG('o','r',' ',' '), HB_TAG('O','R','I',' ')}, /* Odia (formerly Oriya) [macrolanguage] */ + {HB_TAG('o','r',' ',' '), HB_TAG('O','R','I',' ')}, /* Odia [macrolanguage] */ {HB_TAG('o','s',' ',' '), HB_TAG('O','S','S',' ')}, /* Ossetian */ {HB_TAG('p','a',' ',' '), HB_TAG('P','A','N',' ')}, /* Punjabi */ {HB_TAG('p','i',' ',' '), HB_TAG('P','A','L',' ')}, /* Pali */ @@ -166,7 +166,7 @@ static const LangTag ot_languages2[] = { {HB_TAG('r','o',' ',' '), HB_TAG('R','O','M',' ')}, /* Romanian */ {HB_TAG('r','u',' ',' '), HB_TAG('R','U','S',' ')}, /* Russian */ {HB_TAG('r','w',' ',' '), HB_TAG('R','U','A',' ')}, /* Kinyarwanda */ - {HB_TAG('s','a',' ',' '), HB_TAG('S','A','N',' ')}, /* Sanskrit */ + {HB_TAG('s','a',' ',' '), HB_TAG('S','A','N',' ')}, /* Sanskrit [macrolanguage] */ {HB_TAG('s','c',' ',' '), HB_TAG('S','R','D',' ')}, /* Sardinian [macrolanguage] */ {HB_TAG('s','d',' ',' '), HB_TAG('S','N','D',' ')}, /* Sindhi */ {HB_TAG('s','e',' ',' '), HB_TAG('N','S','M',' ')}, /* Northern Sami */ @@ -465,6 +465,7 @@ static const LangTag ot_languages3[] = { {HB_TAG('c','l','d',' '), HB_TAG('S','Y','R',' ')}, /* Chaldean Neo-Aramaic -> Syriac */ {HB_TAG('c','l','e',' '), HB_TAG('C','C','H','N')}, /* Lealao Chinantec -> Chinantec */ {HB_TAG('c','l','j',' '), HB_TAG('Q','I','N',' ')}, /* Laitu Chin -> Chin */ + {HB_TAG('c','l','s',' '), HB_TAG('S','A','N',' ')}, /* Classical Sanskrit -> Sanskrit */ {HB_TAG('c','l','t',' '), HB_TAG('Q','I','N',' ')}, /* Lautu Chin -> Chin */ {HB_TAG('c','m','n',' '), HB_TAG('Z','H','S',' ')}, /* Mandarin Chinese -> Chinese, Simplified */ {HB_TAG('c','m','r',' '), HB_TAG('Q','I','N',' ')}, /* Mro-Khimi Chin -> Chin */ @@ -637,7 +638,7 @@ static const LangTag ot_languages3[] = { {HB_TAG('g','a','a',' '), HB_TAG('G','A','D',' ')}, /* Ga */ {HB_TAG('g','a','c',' '), HB_TAG('C','P','P',' ')}, /* Mixed Great Andamanese -> Creoles */ {HB_TAG('g','a','d',' '), HB_TAG_NONE }, /* Gaddang != Ga */ - {HB_TAG('g','a','e',' '), HB_TAG_NONE }, /* Guarequena != Scottish Gaelic (Gaelic) */ + {HB_TAG('g','a','e',' '), HB_TAG_NONE }, /* Guarequena != Scottish Gaelic */ /*{HB_TAG('g','a','g',' '), HB_TAG('G','A','G',' ')},*/ /* Gagauz */ {HB_TAG('g','a','l',' '), HB_TAG_NONE }, /* Galolen != Galician */ {HB_TAG('g','a','n',' '), HB_TAG('Z','H','S',' ')}, /* Gan Chinese -> Chinese, Simplified */ @@ -1160,7 +1161,7 @@ static const LangTag ot_languages3[] = { {HB_TAG('o','r','o',' '), HB_TAG_NONE }, /* Orokolo != Oromo */ {HB_TAG('o','r','r',' '), HB_TAG('I','J','O',' ')}, /* Oruma -> Ijo */ {HB_TAG('o','r','s',' '), HB_TAG('M','L','Y',' ')}, /* Orang Seletar -> Malay */ - {HB_TAG('o','r','y',' '), HB_TAG('O','R','I',' ')}, /* Odia (formerly Oriya) */ + {HB_TAG('o','r','y',' '), HB_TAG('O','R','I',' ')}, /* Odia */ {HB_TAG('o','t','w',' '), HB_TAG('O','J','B',' ')}, /* Ottawa -> Ojibway */ {HB_TAG('o','u','a',' '), HB_TAG('B','B','R',' ')}, /* Tagargrent -> Berber */ {HB_TAG('p','a','a',' '), HB_TAG_NONE }, /* Papuan [collection] != Palestinian Aramaic */ @@ -1395,7 +1396,7 @@ static const LangTag ot_languages3[] = { /*{HB_TAG('s','n','k',' '), HB_TAG('S','N','K',' ')},*/ /* Soninke */ {HB_TAG('s','o','g',' '), HB_TAG_NONE }, /* Sogdian != Sodo Gurage */ /*{HB_TAG('s','o','p',' '), HB_TAG('S','O','P',' ')},*/ /* Songe */ - {HB_TAG('s','p','v',' '), HB_TAG('O','R','I',' ')}, /* Sambalpuri -> Odia (formerly Oriya) */ + {HB_TAG('s','p','v',' '), HB_TAG('O','R','I',' ')}, /* Sambalpuri -> Odia */ {HB_TAG('s','p','y',' '), HB_TAG('K','A','L',' ')}, /* Sabaot -> Kalenjin */ {HB_TAG('s','r','b',' '), HB_TAG_NONE }, /* Sora != Serbian */ {HB_TAG('s','r','c',' '), HB_TAG('S','R','D',' ')}, /* Logudorese Sardinian -> Sardinian */ @@ -1533,6 +1534,7 @@ static const LangTag ot_languages3[] = { {HB_TAG('v','l','s',' '), HB_TAG('F','L','E',' ')}, /* Vlaams -> Dutch (Flemish) */ {HB_TAG('v','m','w',' '), HB_TAG('M','A','K',' ')}, /* Makhuwa */ /*{HB_TAG('v','r','o',' '), HB_TAG('V','R','O',' ')},*/ /* Võro */ + {HB_TAG('v','s','n',' '), HB_TAG('S','A','N',' ')}, /* Vedic Sanskrit -> Sanskrit */ {HB_TAG('w','a','g',' '), HB_TAG_NONE }, /* Wa'ema != Wagdi */ /*{HB_TAG('w','a','r',' '), HB_TAG('W','A','R',' ')},*/ /* Waray (Philippines) -> Waray-Waray */ {HB_TAG('w','b','m',' '), HB_TAG('W','A',' ',' ')}, /* Wa */ @@ -2643,7 +2645,7 @@ out: /* Romanian; Moldova */ unsigned int i; hb_tag_t possible_tags[] = { - HB_TAG('M','O','L',' '), /* Moldavian */ + HB_TAG('M','O','L',' '), /* Romanian (Moldova) */ HB_TAG('R','O','M',' '), /* Romanian */ }; for (i = 0; i < 2 && i < *count; i++) @@ -2920,7 +2922,7 @@ hb_ot_ambiguous_tag_to_language (hb_tag_t tag) return hb_language_from_string ("mn", -1); /* Mongolian [macrolanguage] */ case HB_TAG('M','N','K',' '): /* Maninka */ return hb_language_from_string ("man", -1); /* Mandingo [macrolanguage] */ - case HB_TAG('M','O','L',' '): /* Moldavian */ + case HB_TAG('M','O','L',' '): /* Romanian (Moldova) */ return hb_language_from_string ("ro-MD", -1); /* Romanian; Moldova */ case HB_TAG('M','O','N','T'): /* Thailand Mon */ return hb_language_from_string ("mnw-TH", -1); /* Mon; Thailand */ @@ -2958,6 +2960,8 @@ hb_ot_ambiguous_tag_to_language (hb_tag_t tag) return hb_language_from_string ("ro", -1); /* Romanian */ case HB_TAG('R','O','Y',' '): /* Romany */ return hb_language_from_string ("rom", -1); /* Romany [macrolanguage] */ + case HB_TAG('S','A','N',' '): /* Sanskrit */ + return hb_language_from_string ("sa", -1); /* Sanskrit [macrolanguage] */ case HB_TAG('S','Q','I',' '): /* Albanian */ return hb_language_from_string ("sq", -1); /* Albanian [macrolanguage] */ case HB_TAG('S','R','B',' '): /* Serbian */ diff --git a/gfx/harfbuzz/src/hb-ot-tag.cc b/gfx/harfbuzz/src/hb-ot-tag.cc index 53b6b38f66..0c63756b14 100644 --- a/gfx/harfbuzz/src/hb-ot-tag.cc +++ b/gfx/harfbuzz/src/hb-ot-tag.cc @@ -547,7 +547,7 @@ hb_ot_tag_to_language (hb_tag_t tag) buf[3] = '-'; str += 4; } - snprintf (str, 16, "x-hbot-%08x", tag); + snprintf (str, 16, "x-hbot-%08" PRIx32, tag); return hb_language_from_string (&*buf, -1); } } diff --git a/gfx/harfbuzz/src/hb-ot-var-avar-table.hh b/gfx/harfbuzz/src/hb-ot-var-avar-table.hh index b2e5d87a3c..9149959d79 100644 --- a/gfx/harfbuzz/src/hb-ot-var-avar-table.hh +++ b/gfx/harfbuzz/src/hb-ot-var-avar-table.hh @@ -57,7 +57,7 @@ struct avarV2Tail protected: Offset32To<DeltaSetIndexMap> varIdxMap; /* Offset from the beginning of 'avar' table. */ - Offset32To<VariationStore> varStore; /* Offset from the beginning of 'avar' table. */ + Offset32To<ItemVariationStore> varStore; /* Offset from the beginning of 'avar' table. */ public: DEFINE_SIZE_STATIC (8); @@ -230,7 +230,7 @@ struct SegmentMaps : Array16Of<AxisValueMap> * duplicates here */ if (mapping.must_include ()) continue; - value_mappings.push (std::move (mapping)); + value_mappings.push (mapping); } AxisValueMap m; @@ -343,7 +343,7 @@ struct avar for (unsigned i = 0; i < coords_length; i++) coords[i] = out[i]; - OT::VariationStore::destroy_cache (var_store_cache); + OT::ItemVariationStore::destroy_cache (var_store_cache); #endif } diff --git a/gfx/harfbuzz/src/hb-ot-var-common.hh b/gfx/harfbuzz/src/hb-ot-var-common.hh index eff6df380f..379e164059 100644 --- a/gfx/harfbuzz/src/hb-ot-var-common.hh +++ b/gfx/harfbuzz/src/hb-ot-var-common.hh @@ -28,6 +28,7 @@ #include "hb-ot-layout-common.hh" #include "hb-priority-queue.hh" +#include "hb-subset-instancer-iup.hh" namespace OT { @@ -221,9 +222,9 @@ struct DeltaSetIndexMap }; -struct VarStoreInstancer +struct ItemVarStoreInstancer { - VarStoreInstancer (const VariationStore *varStore, + ItemVarStoreInstancer (const ItemVariationStore *varStore, const DeltaSetIndexMap *varIdxMap, hb_array_t<int> coords) : varStore (varStore), varIdxMap (varIdxMap), coords (coords) {} @@ -235,7 +236,7 @@ struct VarStoreInstancer float operator() (uint32_t varIdx, unsigned short offset = 0) const { return coords ? varStore->get_delta (varIdxMap ? varIdxMap->map (VarIdx::add (varIdx, offset)) : varIdx + offset, coords) : 0; } - const VariationStore *varStore; + const ItemVariationStore *varStore; const DeltaSetIndexMap *varIdxMap; hb_array_t<int> coords; }; @@ -460,7 +461,7 @@ struct tuple_delta_t tuple_delta_t () = default; tuple_delta_t (const tuple_delta_t& o) = default; - friend void swap (tuple_delta_t& a, tuple_delta_t& b) + friend void swap (tuple_delta_t& a, tuple_delta_t& b) noexcept { hb_swap (a.axis_tuples, b.axis_tuples); hb_swap (a.indices, b.indices); @@ -471,10 +472,10 @@ struct tuple_delta_t hb_swap (a.compiled_peak_coords, b.compiled_peak_coords); } - tuple_delta_t (tuple_delta_t&& o) : tuple_delta_t () + tuple_delta_t (tuple_delta_t&& o) noexcept : tuple_delta_t () { hb_swap (*this, o); } - tuple_delta_t& operator = (tuple_delta_t&& o) + tuple_delta_t& operator = (tuple_delta_t&& o) noexcept { hb_swap (*this, o); return *this; @@ -609,7 +610,9 @@ struct tuple_delta_t const hb_map_t& axes_old_index_tag_map, const hb_hashmap_t<const hb_vector_t<char>*, unsigned>* shared_tuples_idx_map) { - if (!compiled_deltas) return false; + /* compiled_deltas could be empty after iup delta optimization, we can skip + * compiling this tuple and return true */ + if (!compiled_deltas) return true; unsigned cur_axis_count = axes_index_map.get_population (); /* allocate enough memory: 1 peak + 2 intermediate coords + fixed header size */ @@ -723,22 +726,28 @@ struct tuple_delta_t } bool compile_deltas () + { return compile_deltas (indices, deltas_x, deltas_y, compiled_deltas); } + + bool compile_deltas (const hb_vector_t<bool> &point_indices, + const hb_vector_t<float> &x_deltas, + const hb_vector_t<float> &y_deltas, + hb_vector_t<char> &compiled_deltas /* OUT */) { hb_vector_t<int> rounded_deltas; - if (unlikely (!rounded_deltas.alloc (indices.length))) + if (unlikely (!rounded_deltas.alloc (point_indices.length))) return false; - for (unsigned i = 0; i < indices.length; i++) + for (unsigned i = 0; i < point_indices.length; i++) { - if (!indices[i]) continue; - int rounded_delta = (int) roundf (deltas_x[i]); + if (!point_indices[i]) continue; + int rounded_delta = (int) roundf (x_deltas.arrayZ[i]); rounded_deltas.push (rounded_delta); } - if (!rounded_deltas) return false; + if (!rounded_deltas) return true; /* allocate enough memories 3 * num_deltas */ unsigned alloc_len = 3 * rounded_deltas.length; - if (deltas_y) + if (y_deltas) alloc_len *= 2; if (unlikely (!compiled_deltas.resize (alloc_len))) return false; @@ -746,14 +755,14 @@ struct tuple_delta_t unsigned i = 0; unsigned encoded_len = encode_delta_run (i, compiled_deltas.as_array (), rounded_deltas); - if (deltas_y) + if (y_deltas) { - /* reuse the rounded_deltas vector, check that deltas_y have the same num of deltas as deltas_x */ + /* reuse the rounded_deltas vector, check that y_deltas have the same num of deltas as x_deltas */ unsigned j = 0; - for (unsigned idx = 0; idx < indices.length; idx++) + for (unsigned idx = 0; idx < point_indices.length; idx++) { - if (!indices[idx]) continue; - int rounded_delta = (int) roundf (deltas_y[idx]); + if (!point_indices[idx]) continue; + int rounded_delta = (int) roundf (y_deltas.arrayZ[idx]); if (j >= rounded_deltas.length) return false; @@ -761,7 +770,7 @@ struct tuple_delta_t } if (j != rounded_deltas.length) return false; - /* reset i because we reuse rounded_deltas for deltas_y */ + /* reset i because we reuse rounded_deltas for y_deltas */ i = 0; encoded_len += encode_delta_run (i, compiled_deltas.as_array ().sub_array (encoded_len), rounded_deltas); } @@ -1020,6 +1029,171 @@ struct tuple_delta_t return true; } + bool optimize (const contour_point_vector_t& contour_points, + bool is_composite, + float tolerance = 0.5f) + { + unsigned count = contour_points.length; + if (deltas_x.length != count || + deltas_y.length != count) + return false; + + hb_vector_t<bool> opt_indices; + hb_vector_t<int> rounded_x_deltas, rounded_y_deltas; + + if (unlikely (!rounded_x_deltas.alloc (count) || + !rounded_y_deltas.alloc (count))) + return false; + + for (unsigned i = 0; i < count; i++) + { + int rounded_x_delta = (int) roundf (deltas_x.arrayZ[i]); + int rounded_y_delta = (int) roundf (deltas_y.arrayZ[i]); + rounded_x_deltas.push (rounded_x_delta); + rounded_y_deltas.push (rounded_y_delta); + } + + if (!iup_delta_optimize (contour_points, rounded_x_deltas, rounded_y_deltas, opt_indices, tolerance)) + return false; + + unsigned ref_count = 0; + for (bool ref_flag : opt_indices) + ref_count += ref_flag; + + if (ref_count == count) return true; + + hb_vector_t<float> opt_deltas_x, opt_deltas_y; + bool is_comp_glyph_wo_deltas = (is_composite && ref_count == 0); + if (is_comp_glyph_wo_deltas) + { + if (unlikely (!opt_deltas_x.resize (count) || + !opt_deltas_y.resize (count))) + return false; + + opt_indices.arrayZ[0] = true; + for (unsigned i = 1; i < count; i++) + opt_indices.arrayZ[i] = false; + } + + hb_vector_t<char> opt_point_data; + if (!compile_point_set (opt_indices, opt_point_data)) + return false; + hb_vector_t<char> opt_deltas_data; + if (!compile_deltas (opt_indices, + is_comp_glyph_wo_deltas ? opt_deltas_x : deltas_x, + is_comp_glyph_wo_deltas ? opt_deltas_y : deltas_y, + opt_deltas_data)) + return false; + + hb_vector_t<char> point_data; + if (!compile_point_set (indices, point_data)) + return false; + hb_vector_t<char> deltas_data; + if (!compile_deltas (indices, deltas_x, deltas_y, deltas_data)) + return false; + + if (opt_point_data.length + opt_deltas_data.length < point_data.length + deltas_data.length) + { + indices.fini (); + indices = std::move (opt_indices); + + if (is_comp_glyph_wo_deltas) + { + deltas_x.fini (); + deltas_x = std::move (opt_deltas_x); + + deltas_y.fini (); + deltas_y = std::move (opt_deltas_y); + } + } + return !indices.in_error () && !deltas_x.in_error () && !deltas_y.in_error (); + } + + static bool compile_point_set (const hb_vector_t<bool> &point_indices, + hb_vector_t<char>& compiled_points /* OUT */) + { + unsigned num_points = 0; + for (bool i : point_indices) + if (i) num_points++; + + /* when iup optimization is enabled, num of referenced points could be 0 */ + if (!num_points) return true; + + unsigned indices_length = point_indices.length; + /* If the points set consists of all points in the glyph, it's encoded with a + * single zero byte */ + if (num_points == indices_length) + return compiled_points.resize (1); + + /* allocate enough memories: 2 bytes for count + 3 bytes for each point */ + unsigned num_bytes = 2 + 3 *num_points; + if (unlikely (!compiled_points.resize (num_bytes, false))) + return false; + + unsigned pos = 0; + /* binary data starts with the total number of reference points */ + if (num_points < 0x80) + compiled_points.arrayZ[pos++] = num_points; + else + { + compiled_points.arrayZ[pos++] = ((num_points >> 8) | 0x80); + compiled_points.arrayZ[pos++] = num_points & 0xFF; + } + + const unsigned max_run_length = 0x7F; + unsigned i = 0; + unsigned last_value = 0; + unsigned num_encoded = 0; + while (i < indices_length && num_encoded < num_points) + { + unsigned run_length = 0; + unsigned header_pos = pos; + compiled_points.arrayZ[pos++] = 0; + + bool use_byte_encoding = false; + bool new_run = true; + while (i < indices_length && num_encoded < num_points && + run_length <= max_run_length) + { + // find out next referenced point index + while (i < indices_length && !point_indices[i]) + i++; + + if (i >= indices_length) break; + + unsigned cur_value = i; + unsigned delta = cur_value - last_value; + + if (new_run) + { + use_byte_encoding = (delta <= 0xFF); + new_run = false; + } + + if (use_byte_encoding && delta > 0xFF) + break; + + if (use_byte_encoding) + compiled_points.arrayZ[pos++] = delta; + else + { + compiled_points.arrayZ[pos++] = delta >> 8; + compiled_points.arrayZ[pos++] = delta & 0xFF; + } + i++; + last_value = cur_value; + run_length++; + num_encoded++; + } + + if (use_byte_encoding) + compiled_points.arrayZ[header_pos] = run_length - 1; + else + compiled_points.arrayZ[header_pos] = (run_length - 1) | 0x80; + } + return compiled_points.resize (pos, false); + } + static float infer_delta (float target_val, float prev_val, float next_val, float prev_delta, float next_delta) { if (prev_val == next_val) @@ -1071,41 +1245,41 @@ struct TupleVariationData private: /* referenced point set->compiled point data map */ - hb_hashmap_t<const hb_vector_t<bool>*, hb_bytes_t> point_data_map; + hb_hashmap_t<const hb_vector_t<bool>*, hb_vector_t<char>> point_data_map; /* referenced point set-> count map, used in finding shared points */ hb_hashmap_t<const hb_vector_t<bool>*, unsigned> point_set_count_map; /* empty for non-gvar tuples. - * shared_points_bytes is just a copy of some value in the point_data_map, + * shared_points_bytes is a pointer to some value in the point_data_map, * which will be freed during map destruction. Save it for serialization, so * no need to do find_shared_points () again */ - hb_bytes_t shared_points_bytes; + hb_vector_t<char> *shared_points_bytes = nullptr; /* total compiled byte size as TupleVariationData format, initialized to its * min_size: 4 */ unsigned compiled_byte_size = 4; + /* for gvar iup delta optimization: whether this is a composite glyph */ + bool is_composite = false; + public: tuple_variations_t () = default; tuple_variations_t (const tuple_variations_t&) = delete; tuple_variations_t& operator=(const tuple_variations_t&) = delete; tuple_variations_t (tuple_variations_t&&) = default; tuple_variations_t& operator=(tuple_variations_t&&) = default; - ~tuple_variations_t () { fini (); } - void fini () - { - for (auto _ : point_data_map.values ()) - _.fini (); - - point_set_count_map.fini (); - tuple_vars.fini (); - } + ~tuple_variations_t () = default; explicit operator bool () const { return bool (tuple_vars); } unsigned get_var_count () const { - unsigned count = tuple_vars.length; - if (shared_points_bytes.length) + unsigned count = 0; + /* when iup delta opt is enabled, compiled_deltas could be empty and we + * should skip this tuple */ + for (auto& tuple: tuple_vars) + if (tuple.compiled_deltas) count++; + + if (shared_points_bytes && shared_points_bytes->length) count |= TupleVarCount::SharedPointNumbers; return count; } @@ -1119,26 +1293,27 @@ struct TupleVariationData bool is_gvar, const hb_map_t *axes_old_index_tag_map, const hb_vector_t<unsigned> &shared_indices, - const hb_array_t<const F2DOT14> shared_tuples) + const hb_array_t<const F2DOT14> shared_tuples, + bool is_composite_glyph) { do { const HBUINT8 *p = iterator.get_serialized_data (); unsigned int length = iterator.current_tuple->get_data_size (); if (unlikely (!iterator.var_data_bytes.check_range (p, length))) - { fini (); return false; } + return false; hb_hashmap_t<hb_tag_t, Triple> axis_tuples; if (!iterator.current_tuple->unpack_axis_tuples (iterator.get_axis_count (), shared_tuples, axes_old_index_tag_map, axis_tuples) || axis_tuples.is_empty ()) - { fini (); return false; } + return false; hb_vector_t<unsigned> private_indices; bool has_private_points = iterator.current_tuple->has_private_points (); const HBUINT8 *end = p + length; if (has_private_points && !TupleVariationData::unpack_points (p, private_indices, end)) - { fini (); return false; } + return false; const hb_vector_t<unsigned> &indices = has_private_points ? private_indices : shared_indices; bool apply_to_all = (indices.length == 0); @@ -1148,24 +1323,24 @@ struct TupleVariationData if (unlikely (!deltas_x.resize (num_deltas, false) || !TupleVariationData::unpack_deltas (p, deltas_x, end))) - { fini (); return false; } + return false; hb_vector_t<int> deltas_y; if (is_gvar) { if (unlikely (!deltas_y.resize (num_deltas, false) || !TupleVariationData::unpack_deltas (p, deltas_y, end))) - { fini (); return false; } + return false; } tuple_delta_t var; var.axis_tuples = std::move (axis_tuples); if (unlikely (!var.indices.resize (point_count) || !var.deltas_x.resize (point_count, false))) - { fini (); return false; } + return false; if (is_gvar && unlikely (!var.deltas_y.resize (point_count, false))) - { fini (); return false; } + return false; for (unsigned i = 0; i < num_deltas; i++) { @@ -1178,6 +1353,8 @@ struct TupleVariationData } tuple_vars.push (std::move (var)); } while (iterator.move_to_next ()); + + is_composite = is_composite_glyph; return true; } @@ -1261,7 +1438,7 @@ struct TupleVariationData unsigned new_len = new_vars.length + out.length; if (unlikely (!new_vars.alloc (new_len, false))) - { fini (); return false;} + return false; for (unsigned i = 0; i < out.length; i++) new_vars.push (std::move (out[i])); @@ -1272,8 +1449,9 @@ struct TupleVariationData return true; } - /* merge tuple variations with overlapping tents */ - void merge_tuple_variations () + /* merge tuple variations with overlapping tents, if iup delta optimization + * is enabled, add default deltas to contour_points */ + bool merge_tuple_variations (contour_point_vector_t* contour_points = nullptr) { hb_vector_t<tuple_delta_t> new_vars; hb_hashmap_t<const hb_hashmap_t<hb_tag_t, Triple>*, unsigned> m; @@ -1281,7 +1459,15 @@ struct TupleVariationData for (const tuple_delta_t& var : tuple_vars) { /* if all axes are pinned, drop the tuple variation */ - if (var.axis_tuples.is_empty ()) continue; + if (var.axis_tuples.is_empty ()) + { + /* if iup_delta_optimize is enabled, add deltas to contour coords */ + if (contour_points && !contour_points->add_deltas (var.deltas_x, + var.deltas_y, + var.indices)) + return false; + continue; + } unsigned *idx; if (m.has (&(var.axis_tuples), &idx)) @@ -1291,98 +1477,14 @@ struct TupleVariationData else { new_vars.push (var); - m.set (&(var.axis_tuples), i); + if (!m.set (&(var.axis_tuples), i)) + return false; i++; } } tuple_vars.fini (); tuple_vars = std::move (new_vars); - } - - hb_bytes_t compile_point_set (const hb_vector_t<bool> &point_indices) - { - unsigned num_points = 0; - for (bool i : point_indices) - if (i) num_points++; - - unsigned indices_length = point_indices.length; - /* If the points set consists of all points in the glyph, it's encoded with a - * single zero byte */ - if (num_points == indices_length) - { - char *p = (char *) hb_calloc (1, sizeof (char)); - if (unlikely (!p)) return hb_bytes_t (); - - return hb_bytes_t (p, 1); - } - - /* allocate enough memories: 2 bytes for count + 3 bytes for each point */ - unsigned num_bytes = 2 + 3 *num_points; - char *p = (char *) hb_calloc (num_bytes, sizeof (char)); - if (unlikely (!p)) return hb_bytes_t (); - - unsigned pos = 0; - /* binary data starts with the total number of reference points */ - if (num_points < 0x80) - p[pos++] = num_points; - else - { - p[pos++] = ((num_points >> 8) | 0x80); - p[pos++] = num_points & 0xFF; - } - - const unsigned max_run_length = 0x7F; - unsigned i = 0; - unsigned last_value = 0; - unsigned num_encoded = 0; - while (i < indices_length && num_encoded < num_points) - { - unsigned run_length = 0; - unsigned header_pos = pos; - p[pos++] = 0; - - bool use_byte_encoding = false; - bool new_run = true; - while (i < indices_length && num_encoded < num_points && - run_length <= max_run_length) - { - // find out next referenced point index - while (i < indices_length && !point_indices[i]) - i++; - - if (i >= indices_length) break; - - unsigned cur_value = i; - unsigned delta = cur_value - last_value; - - if (new_run) - { - use_byte_encoding = (delta <= 0xFF); - new_run = false; - } - - if (use_byte_encoding && delta > 0xFF) - break; - - if (use_byte_encoding) - p[pos++] = delta; - else - { - p[pos++] = delta >> 8; - p[pos++] = delta & 0xFF; - } - i++; - last_value = cur_value; - run_length++; - num_encoded++; - } - - if (use_byte_encoding) - p[header_pos] = run_length - 1; - else - p[header_pos] = (run_length - 1) | 0x80; - } - return hb_bytes_t (p, pos); + return true; } /* compile all point set and store byte data in a point_set->hb_bytes_t hashmap, @@ -1402,11 +1504,11 @@ struct TupleVariationData continue; } - hb_bytes_t compiled_data = compile_point_set (*points_set); - if (unlikely (compiled_data == hb_bytes_t ())) + hb_vector_t<char> compiled_point_data; + if (!tuple_delta_t::compile_point_set (*points_set, compiled_point_data)) return false; - if (!point_data_map.set (points_set, compiled_data) || + if (!point_data_map.set (points_set, std::move (compiled_point_data)) || !point_set_count_map.set (points_set, 1)) return false; } @@ -1414,31 +1516,33 @@ struct TupleVariationData } /* find shared points set which saves most bytes */ - hb_bytes_t find_shared_points () + void find_shared_points () { unsigned max_saved_bytes = 0; - hb_bytes_t res{}; - for (const auto& _ : point_data_map.iter ()) + for (const auto& _ : point_data_map.iter_ref ()) { const hb_vector_t<bool>* points_set = _.first; unsigned data_length = _.second.length; + if (!data_length) continue; unsigned *count; if (unlikely (!point_set_count_map.has (points_set, &count) || *count <= 1)) - return hb_bytes_t (); + { + shared_points_bytes = nullptr; + return; + } unsigned saved_bytes = data_length * ((*count) -1); if (saved_bytes > max_saved_bytes) { max_saved_bytes = saved_bytes; - res = _.second; + shared_points_bytes = &(_.second); } } - return res; } - bool calc_inferred_deltas (contour_point_vector_t& contour_points) + bool calc_inferred_deltas (const contour_point_vector_t& contour_points) { for (tuple_delta_t& var : tuple_vars) if (!var.calc_inferred_deltas (contour_points)) @@ -1447,10 +1551,21 @@ struct TupleVariationData return true; } + bool iup_optimize (const contour_point_vector_t& contour_points) + { + for (tuple_delta_t& var : tuple_vars) + { + if (!var.optimize (contour_points, is_composite)) + return false; + } + return true; + } + public: bool instantiate (const hb_hashmap_t<hb_tag_t, Triple>& normalized_axes_location, const hb_hashmap_t<hb_tag_t, TripleDistances>& axes_triple_distances, - contour_point_vector_t* contour_points = nullptr) + contour_point_vector_t* contour_points = nullptr, + bool optimize = false) { if (!tuple_vars) return true; if (!change_tuple_variations_axis_limits (normalized_axes_location, axes_triple_distances)) @@ -1460,7 +1575,14 @@ struct TupleVariationData if (!calc_inferred_deltas (*contour_points)) return false; - merge_tuple_variations (); + /* if iup delta opt is on, contour_points can't be null */ + if (optimize && !contour_points) + return false; + + if (!merge_tuple_variations (optimize ? contour_points : nullptr)) + return false; + + if (optimize && !iup_optimize (*contour_points)) return false; return !tuple_vars.in_error (); } @@ -1475,21 +1597,27 @@ struct TupleVariationData if (use_shared_points) { - shared_points_bytes = find_shared_points (); - compiled_byte_size += shared_points_bytes.length; + find_shared_points (); + if (shared_points_bytes) + compiled_byte_size += shared_points_bytes->length; } // compile delta and tuple var header for each tuple variation for (auto& tuple: tuple_vars) { const hb_vector_t<bool>* points_set = &(tuple.indices); - hb_bytes_t *points_data; + hb_vector_t<char> *points_data; if (unlikely (!point_data_map.has (points_set, &points_data))) return false; + /* when iup optimization is enabled, num of referenced points could be 0 + * and thus the compiled points bytes is empty, we should skip compiling + * this tuple */ + if (!points_data->length) + continue; if (!tuple.compile_deltas ()) return false; - unsigned points_data_length = (*points_data != shared_points_bytes) ? points_data->length : 0; + unsigned points_data_length = (points_data != shared_points_bytes) ? points_data->length : 0; if (!tuple.compile_tuple_var_header (axes_index_map, points_data_length, axes_old_index_tag_map, shared_tuples_idx_map)) return false; @@ -1513,18 +1641,24 @@ struct TupleVariationData bool serialize_var_data (hb_serialize_context_t *c, bool is_gvar) const { TRACE_SERIALIZE (this); - if (is_gvar) - shared_points_bytes.copy (c); + if (is_gvar && shared_points_bytes) + { + hb_bytes_t s (shared_points_bytes->arrayZ, shared_points_bytes->length); + s.copy (c); + } for (const auto& tuple: tuple_vars) { const hb_vector_t<bool>* points_set = &(tuple.indices); - hb_bytes_t *point_data; + hb_vector_t<char> *point_data; if (!point_data_map.has (points_set, &point_data)) return_trace (false); - if (!is_gvar || *point_data != shared_points_bytes) - point_data->copy (c); + if (!is_gvar || point_data != shared_points_bytes) + { + hb_bytes_t s (point_data->arrayZ, point_data->length); + s.copy (c); + } tuple.compiled_deltas.as_array ().copy (c); if (c->in_error ()) return_trace (false); @@ -1711,13 +1845,15 @@ struct TupleVariationData const hb_map_t *axes_old_index_tag_map, const hb_vector_t<unsigned> &shared_indices, const hb_array_t<const F2DOT14> shared_tuples, - tuple_variations_t& tuple_variations /* OUT */) const + tuple_variations_t& tuple_variations, /* OUT */ + bool is_composite_glyph = false) const { return tuple_variations.create_from_tuple_var_data (iterator, tupleVarCount, point_count, is_gvar, axes_old_index_tag_map, shared_indices, - shared_tuples); + shared_tuples, + is_composite_glyph); } bool serialize (hb_serialize_context_t *c, @@ -1831,7 +1967,7 @@ struct item_variations_t const hb_map_t& get_varidx_map () const { return varidx_map; } - bool instantiate (const VariationStore& varStore, + bool instantiate (const ItemVariationStore& varStore, const hb_subset_plan_t *plan, bool optimize=true, bool use_no_variation_idx=true, @@ -1845,7 +1981,7 @@ struct item_variations_t } /* keep below APIs public only for unit test: test-item-varstore */ - bool create_from_item_varstore (const VariationStore& varStore, + bool create_from_item_varstore (const ItemVariationStore& varStore, const hb_map_t& axes_old_index_tag_map, const hb_array_t <const hb_inc_bimap_t> inner_maps = hb_array_t<const hb_inc_bimap_t> ()) { diff --git a/gfx/harfbuzz/src/hb-ot-var-gvar-table.hh b/gfx/harfbuzz/src/hb-ot-var-gvar-table.hh index 1c7a1f6c1e..59aad57e37 100644 --- a/gfx/harfbuzz/src/hb-ot-var-gvar-table.hh +++ b/gfx/harfbuzz/src/hb-ot-var-gvar-table.hh @@ -101,10 +101,15 @@ struct glyph_variations_t continue; } + bool is_composite_glyph = false; +#ifdef HB_EXPERIMENTAL_API + is_composite_glyph = plan->composite_new_gids.has (new_gid); +#endif if (!p->decompile_tuple_variations (all_contour_points->length, true /* is_gvar */, iterator, &(plan->axes_old_index_tag_map), shared_indices, shared_tuples, - tuple_vars /* OUT */)) + tuple_vars, /* OUT */ + is_composite_glyph)) return false; glyph_variations.push (std::move (tuple_vars)); } @@ -114,13 +119,17 @@ struct glyph_variations_t bool instantiate (const hb_subset_plan_t *plan) { unsigned count = plan->new_to_old_gid_list.length; + bool iup_optimize = false; +#ifdef HB_EXPERIMENTAL_API + iup_optimize = plan->flags & HB_SUBSET_FLAGS_OPTIMIZE_IUP_DELTAS; +#endif for (unsigned i = 0; i < count; i++) { hb_codepoint_t new_gid = plan->new_to_old_gid_list[i].first; contour_point_vector_t *all_points; if (!plan->new_gid_contour_points_map.has (new_gid, &all_points)) return false; - if (!glyph_variations[i].instantiate (plan->axes_location, plan->axes_triple_distances, all_points)) + if (!glyph_variations[i].instantiate (plan->axes_location, plan->axes_triple_distances, all_points, iup_optimize)) return false; } return true; @@ -340,7 +349,8 @@ struct gvar const glyph_variations_t& glyph_vars, Iterator it, unsigned axis_count, - unsigned num_glyphs) const + unsigned num_glyphs, + bool force_long_offsets) const { TRACE_SERIALIZE (this); gvar *out = c->allocate_min<gvar> (); @@ -352,7 +362,7 @@ struct gvar out->glyphCountX = hb_min (0xFFFFu, num_glyphs); unsigned glyph_var_data_size = glyph_vars.compiled_byte_size (); - bool long_offset = glyph_var_data_size & ~0xFFFFu; + bool long_offset = glyph_var_data_size & ~0xFFFFu || force_long_offsets; out->flags = long_offset ? 1 : 0; HBUINT8 *glyph_var_data_offsets = c->allocate_size<HBUINT8> ((long_offset ? 4 : 2) * (num_glyphs + 1), false); @@ -393,7 +403,12 @@ struct gvar unsigned axis_count = c->plan->axes_index_map.get_population (); unsigned num_glyphs = c->plan->num_output_glyphs (); auto it = hb_iter (c->plan->new_to_old_gid_list); - return_trace (serialize (c->serializer, glyph_vars, it, axis_count, num_glyphs)); + + bool force_long_offsets = false; +#ifdef HB_EXPERIMENTAL_API + force_long_offsets = c->plan->flags & HB_SUBSET_FLAGS_IFTB_REQUIREMENTS; +#endif + return_trace (serialize (c->serializer, glyph_vars, it, axis_count, num_glyphs, force_long_offsets)); } bool subset (hb_subset_context_t *c) const @@ -429,7 +444,7 @@ struct gvar } bool long_offset = (subset_data_size & ~0xFFFFu); - #ifdef HB_EXPERIMENTAL_API +#ifdef HB_EXPERIMENTAL_API long_offset = long_offset || (c->plan->flags & HB_SUBSET_FLAGS_IFTB_REQUIREMENTS); #endif out->flags = long_offset ? 1 : 0; diff --git a/gfx/harfbuzz/src/hb-ot-var-hvar-table.hh b/gfx/harfbuzz/src/hb-ot-var-hvar-table.hh index 53a4642d38..33a4e1a40e 100644 --- a/gfx/harfbuzz/src/hb-ot-var-hvar-table.hh +++ b/gfx/harfbuzz/src/hb-ot-var-hvar-table.hh @@ -188,7 +188,7 @@ struct hvarvvar_subset_plan_t ~hvarvvar_subset_plan_t() { fini (); } void init (const hb_array_t<const DeltaSetIndexMap *> &index_maps, - const VariationStore &_var_store, + const ItemVariationStore &_var_store, const hb_subset_plan_t *plan) { index_map_plans.resize (index_maps.length); @@ -263,7 +263,7 @@ struct hvarvvar_subset_plan_t hb_inc_bimap_t outer_map; hb_vector_t<hb_inc_bimap_t> inner_maps; hb_vector_t<index_map_subset_plan_t> index_map_plans; - const VariationStore *var_store; + const ItemVariationStore *var_store; protected: hb_vector_t<hb_set_t *> inner_sets; @@ -296,7 +296,7 @@ struct HVARVVAR rsbMap.sanitize (c, this)); } - const VariationStore& get_var_store () const + const ItemVariationStore& get_var_store () const { return this+varStore; } void listup_index_maps (hb_vector_t<const DeltaSetIndexMap *> &index_maps) const @@ -384,7 +384,7 @@ struct HVARVVAR float get_advance_delta_unscaled (hb_codepoint_t glyph, const int *coords, unsigned int coord_count, - VariationStore::cache_t *store_cache = nullptr) const + ItemVariationStore::cache_t *store_cache = nullptr) const { uint32_t varidx = (this+advMap).map (glyph); return (this+varStore).get_delta (varidx, @@ -405,7 +405,7 @@ struct HVARVVAR public: FixedVersion<>version; /* Version of the metrics variation table * initially set to 0x00010000u */ - Offset32To<VariationStore> + Offset32To<ItemVariationStore> varStore; /* Offset to item variation store table. */ Offset32To<DeltaSetIndexMap> advMap; /* Offset to advance var-idx mapping. */ diff --git a/gfx/harfbuzz/src/hb-ot-var-mvar-table.hh b/gfx/harfbuzz/src/hb-ot-var-mvar-table.hh index 6d69777618..1f0401d1d3 100644 --- a/gfx/harfbuzz/src/hb-ot-var-mvar-table.hh +++ b/gfx/harfbuzz/src/hb-ot-var-mvar-table.hh @@ -56,7 +56,7 @@ struct VariationValueRecord public: Tag valueTag; /* Four-byte tag identifying a font-wide measure. */ - VarIdx varIdx; /* Outer/inner index into VariationStore item. */ + VarIdx varIdx; /* Outer/inner index into ItemVariationStore item. */ public: DEFINE_SIZE_STATIC (8); @@ -106,7 +106,7 @@ struct MVAR out->valueRecordCount = valueRecordCount; item_variations_t item_vars; - const VariationStore& src_var_store = this+varStore; + const ItemVariationStore& src_var_store = this+varStore; if (!item_vars.instantiate (src_var_store, c->plan)) return_trace (false); @@ -159,7 +159,7 @@ protected: HBUINT16 valueRecordSize;/* The size in bytes of each value record — * must be greater than zero. */ HBUINT16 valueRecordCount;/* The number of value records — may be zero. */ - Offset16To<VariationStore> + Offset16To<ItemVariationStore> varStore; /* Offset to item variation store table. */ UnsizedArrayOf<HBUINT8> valuesZ; /* Array of value records. The records must be diff --git a/gfx/harfbuzz/src/hb-priority-queue.hh b/gfx/harfbuzz/src/hb-priority-queue.hh index 9b962a29d9..274d5df4c5 100644 --- a/gfx/harfbuzz/src/hb-priority-queue.hh +++ b/gfx/harfbuzz/src/hb-priority-queue.hh @@ -163,7 +163,7 @@ struct hb_priority_queue_t goto repeat; } - void swap (unsigned a, unsigned b) + void swap (unsigned a, unsigned b) noexcept { assert (a < heap.length); assert (b < heap.length); diff --git a/gfx/harfbuzz/src/hb-repacker.hh b/gfx/harfbuzz/src/hb-repacker.hh index e9cd376ad3..ed40f271cc 100644 --- a/gfx/harfbuzz/src/hb-repacker.hh +++ b/gfx/harfbuzz/src/hb-repacker.hh @@ -239,6 +239,54 @@ bool _try_isolating_subgraphs (const hb_vector_t<graph::overflow_record_t>& over } static inline +bool _resolve_shared_overflow(const hb_vector_t<graph::overflow_record_t>& overflows, + int overflow_index, + graph_t& sorted_graph) +{ + const graph::overflow_record_t& r = overflows[overflow_index]; + + // Find all of the parents in overflowing links that link to this + // same child node. We will then try duplicating the child node and + // re-assigning all of these parents to the duplicate. + hb_set_t parents; + parents.add(r.parent); + for (int i = overflow_index - 1; i >= 0; i--) { + const graph::overflow_record_t& r2 = overflows[i]; + if (r2.child == r.child) { + parents.add(r2.parent); + } + } + + unsigned result = sorted_graph.duplicate(&parents, r.child); + if (result == (unsigned) -1 && parents.get_population() > 2) { + // All links to the child are overflowing, so we can't include all + // in the duplication. Remove one parent from the duplication. + // Remove the lowest index parent, which will be the closest to the child. + parents.del(parents.get_min()); + result = sorted_graph.duplicate(&parents, r.child); + } + + if (result == (unsigned) -1) return result; + + if (parents.get_population() > 1) { + // If the duplicated node has more than one parent pre-emptively raise it's priority to the maximum. + // This will place it close to the parents. Node's with only one parent, don't need this as normal overflow + // resolution will raise priority if needed. + // + // Reasoning: most of the parents to this child are likely at the same layer in the graph. Duplicating + // the child will theoretically allow it to be placed closer to it's parents. However, due to the shortest + // distance sort by default it's placement will remain in the same layer, thus it will remain in roughly the + // same position (and distance from parents) as the original child node. The overflow resolution will attempt + // to move nodes closer, but only for non-shared nodes. Since this node is shared, it will simply be given + // further duplication which defeats the attempt to duplicate with multiple parents. To fix this we + // pre-emptively raise priority now which allows the duplicated node to pack into the same layer as it's parents. + sorted_graph.vertices_[result].give_max_priority(); + } + + return result; +} + +static inline bool _process_overflows (const hb_vector_t<graph::overflow_record_t>& overflows, hb_set_t& priority_bumped_parents, graph_t& sorted_graph) @@ -254,7 +302,7 @@ bool _process_overflows (const hb_vector_t<graph::overflow_record_t>& overflows, { // The child object is shared, we may be able to eliminate the overflow // by duplicating it. - if (sorted_graph.duplicate (r.parent, r.child) == (unsigned) -1) continue; + if (!_resolve_shared_overflow(overflows, i, sorted_graph)) continue; return true; } @@ -388,7 +436,7 @@ template<typename T> inline hb_blob_t* hb_resolve_overflows (const T& packed, hb_tag_t table_tag, - unsigned max_rounds = 20, + unsigned max_rounds = 32, bool recalculate_extensions = false) { graph_t sorted_graph (packed); if (sorted_graph.in_error ()) diff --git a/gfx/harfbuzz/src/hb-serialize.hh b/gfx/harfbuzz/src/hb-serialize.hh index 15eccb6a09..73634e6b93 100644 --- a/gfx/harfbuzz/src/hb-serialize.hh +++ b/gfx/harfbuzz/src/hb-serialize.hh @@ -91,7 +91,7 @@ struct hb_serialize_context_t } #endif - friend void swap (object_t& a, object_t& b) + friend void swap (object_t& a, object_t& b) noexcept { hb_swap (a.head, b.head); hb_swap (a.tail, b.tail); @@ -156,9 +156,9 @@ struct hb_serialize_context_t object_t *next; auto all_links () const HB_AUTO_RETURN - (( hb_concat (this->real_links, this->virtual_links) )); + (( hb_concat (real_links, virtual_links) )); auto all_links_writer () HB_AUTO_RETURN - (( hb_concat (this->real_links.writer (), this->virtual_links.writer ()) )); + (( hb_concat (real_links.writer (), virtual_links.writer ()) )); }; struct snapshot_t diff --git a/gfx/harfbuzz/src/hb-set.hh b/gfx/harfbuzz/src/hb-set.hh index ff2a170d2d..ce69ea2c9b 100644 --- a/gfx/harfbuzz/src/hb-set.hh +++ b/gfx/harfbuzz/src/hb-set.hh @@ -44,10 +44,10 @@ struct hb_sparseset_t ~hb_sparseset_t () { fini (); } hb_sparseset_t (const hb_sparseset_t& other) : hb_sparseset_t () { set (other); } - hb_sparseset_t (hb_sparseset_t&& other) : hb_sparseset_t () { s = std::move (other.s); } + hb_sparseset_t (hb_sparseset_t&& other) noexcept : hb_sparseset_t () { s = std::move (other.s); } hb_sparseset_t& operator = (const hb_sparseset_t& other) { set (other); return *this; } - hb_sparseset_t& operator = (hb_sparseset_t&& other) { s = std::move (other.s); return *this; } - friend void swap (hb_sparseset_t& a, hb_sparseset_t& b) { hb_swap (a.s, b.s); } + hb_sparseset_t& operator = (hb_sparseset_t&& other) noexcept { s = std::move (other.s); return *this; } + friend void swap (hb_sparseset_t& a, hb_sparseset_t& b) noexcept { hb_swap (a.s, b.s); } hb_sparseset_t (std::initializer_list<hb_codepoint_t> lst) : hb_sparseset_t () { @@ -166,7 +166,7 @@ struct hb_set_t : hb_sparseset_t<hb_bit_set_invertible_t> ~hb_set_t () = default; hb_set_t () : sparseset () {}; hb_set_t (const hb_set_t &o) : sparseset ((sparseset &) o) {}; - hb_set_t (hb_set_t&& o) : sparseset (std::move ((sparseset &) o)) {} + hb_set_t (hb_set_t&& o) noexcept : sparseset (std::move ((sparseset &) o)) {} hb_set_t& operator = (const hb_set_t&) = default; hb_set_t& operator = (hb_set_t&&) = default; hb_set_t (std::initializer_list<hb_codepoint_t> lst) : sparseset (lst) {} diff --git a/gfx/harfbuzz/src/hb-subset-cff2.cc b/gfx/harfbuzz/src/hb-subset-cff2.cc index abc108e571..eb5cb0c625 100644 --- a/gfx/harfbuzz/src/hb-subset-cff2.cc +++ b/gfx/harfbuzz/src/hb-subset-cff2.cc @@ -248,7 +248,7 @@ struct cff2_subr_subsetter_t : subr_subsetter_t<cff2_subr_subsetter_t, CFF2Subrs struct cff2_private_blend_encoder_param_t { cff2_private_blend_encoder_param_t (hb_serialize_context_t *c, - const CFF2VariationStore *varStore, + const CFF2ItemVariationStore *varStore, hb_array_t<int> normalized_coords) : c (c), varStore (varStore), normalized_coords (normalized_coords) {} @@ -284,7 +284,7 @@ struct cff2_private_blend_encoder_param_t unsigned ivs = 0; unsigned region_count = 0; hb_vector_t<float> scalars; - const CFF2VariationStore *varStore = nullptr; + const CFF2ItemVariationStore *varStore = nullptr; hb_array_t<int> normalized_coords; }; @@ -378,7 +378,7 @@ struct cff2_private_dict_blend_opset_t : dict_opset_t struct cff2_private_dict_op_serializer_t : op_serializer_t { cff2_private_dict_op_serializer_t (bool desubroutinize_, bool drop_hints_, bool pinned_, - const CFF::CFF2VariationStore* varStore_, + const CFF::CFF2ItemVariationStore* varStore_, hb_array_t<int> normalized_coords_) : desubroutinize (desubroutinize_), drop_hints (drop_hints_), pinned (pinned_), varStore (varStore_), normalized_coords (normalized_coords_) {} @@ -416,7 +416,7 @@ struct cff2_private_dict_op_serializer_t : op_serializer_t const bool desubroutinize; const bool drop_hints; const bool pinned; - const CFF::CFF2VariationStore* varStore; + const CFF::CFF2ItemVariationStore* varStore; hb_array_t<int> normalized_coords; }; @@ -628,10 +628,10 @@ OT::cff2::accelerator_subset_t::serialize (hb_serialize_context_t *c, } /* variation store */ - if (varStore != &Null (CFF2VariationStore) && + if (varStore != &Null (CFF2ItemVariationStore) && !plan.pinned) { - auto *dest = c->push<CFF2VariationStore> (); + auto *dest = c->push<CFF2ItemVariationStore> (); if (unlikely (!dest->serialize (c, varStore))) { c->pop_discard (); diff --git a/gfx/harfbuzz/src/hb-subset-input.cc b/gfx/harfbuzz/src/hb-subset-input.cc index 1e0a89a630..68a3e77788 100644 --- a/gfx/harfbuzz/src/hb-subset-input.cc +++ b/gfx/harfbuzz/src/hb-subset-input.cc @@ -24,6 +24,7 @@ * Google Author(s): Garret Rieger, Rod Sheeter, Behdad Esfahbod */ +#include "hb-subset-instancer-solver.hh" #include "hb-subset.hh" #include "hb-set.hh" #include "hb-utf.hh" @@ -50,7 +51,6 @@ hb_subset_input_t::hb_subset_input_t () HB_TAG ('k', 'e', 'r', 'n'), // Copied from fontTools: - HB_TAG ('B', 'A', 'S', 'E'), HB_TAG ('J', 'S', 'T', 'F'), HB_TAG ('D', 'S', 'I', 'G'), HB_TAG ('E', 'B', 'D', 'T'), @@ -418,6 +418,46 @@ hb_subset_input_keep_everything (hb_subset_input_t *input) #ifndef HB_NO_VAR /** + * hb_subset_input_pin_all_axes_to_default: (skip) + * @input: a #hb_subset_input_t object. + * @face: a #hb_face_t object. + * + * Pin all axes to default locations in the given subset input object. + * + * All axes in a font must be pinned. Additionally, `CFF2` table, if present, + * will be de-subroutinized. + * + * Return value: `true` if success, `false` otherwise + * + * Since: 8.3.1 + **/ +HB_EXTERN hb_bool_t +hb_subset_input_pin_all_axes_to_default (hb_subset_input_t *input, + hb_face_t *face) +{ + unsigned axis_count = hb_ot_var_get_axis_count (face); + if (!axis_count) return false; + + hb_ot_var_axis_info_t *axis_infos = (hb_ot_var_axis_info_t *) hb_calloc (axis_count, sizeof (hb_ot_var_axis_info_t)); + if (unlikely (!axis_infos)) return false; + + (void) hb_ot_var_get_axis_infos (face, 0, &axis_count, axis_infos); + + for (unsigned i = 0; i < axis_count; i++) + { + hb_tag_t axis_tag = axis_infos[i].tag; + float default_val = axis_infos[i].default_value; + if (!input->axes_location.set (axis_tag, Triple (default_val, default_val, default_val))) + { + hb_free (axis_infos); + return false; + } + } + hb_free (axis_infos); + return true; +} + +/** * hb_subset_input_pin_axis_to_default: (skip) * @input: a #hb_subset_input_t object. * @face: a #hb_face_t object. @@ -481,16 +521,13 @@ hb_subset_input_pin_axis_location (hb_subset_input_t *input, * @input: a #hb_subset_input_t object. * @face: a #hb_face_t object. * @axis_tag: Tag of the axis - * @axis_min_value: Minimum value of the axis variation range to set - * @axis_max_value: Maximum value of the axis variation range to set - * @axis_def_value: Default value of the axis variation range to set, in case of - * null, it'll be determined automatically + * @axis_min_value: Minimum value of the axis variation range to set, if NaN the existing min will be used. + * @axis_max_value: Maximum value of the axis variation range to set if NaN the existing max will be used. + * @axis_def_value: Default value of the axis variation range to set, if NaN the existing default will be used. * * Restricting the range of variation on an axis in the given subset input object. * New min/default/max values will be clamped if they're not within the fvar axis range. - * If the new default value is null: - * If the fvar axis default value is within the new range, then new default - * value is the same as original default value. + * * If the fvar axis default value is not within the new range, the new default * value will be changed to the new min or max value, whichever is closer to the fvar * axis default. @@ -509,21 +546,57 @@ hb_subset_input_set_axis_range (hb_subset_input_t *input, hb_tag_t axis_tag, float axis_min_value, float axis_max_value, - float *axis_def_value /* IN, maybe NULL */) + float axis_def_value) { - if (axis_min_value > axis_max_value) - return false; - hb_ot_var_axis_info_t axis_info; if (!hb_ot_var_find_axis_info (face, axis_tag, &axis_info)) return false; - float new_min_val = hb_clamp(axis_min_value, axis_info.min_value, axis_info.max_value); - float new_max_val = hb_clamp(axis_max_value, axis_info.min_value, axis_info.max_value); - float new_default_val = axis_def_value ? *axis_def_value : axis_info.default_value; - new_default_val = hb_clamp(new_default_val, new_min_val, new_max_val); + float min = !std::isnan(axis_min_value) ? axis_min_value : axis_info.min_value; + float max = !std::isnan(axis_max_value) ? axis_max_value : axis_info.max_value; + float def = !std::isnan(axis_def_value) ? axis_def_value : axis_info.default_value; + + if (min > max) + return false; + + float new_min_val = hb_clamp(min, axis_info.min_value, axis_info.max_value); + float new_max_val = hb_clamp(max, axis_info.min_value, axis_info.max_value); + float new_default_val = hb_clamp(def, new_min_val, new_max_val); return input->axes_location.set (axis_tag, Triple (new_min_val, new_default_val, new_max_val)); } + +/** + * hb_subset_input_get_axis_range: (skip) + * @input: a #hb_subset_input_t object. + * @axis_tag: Tag of the axis + * @axis_min_value: Set to the previously configured minimum value of the axis variation range. + * @axis_max_value: Set to the previously configured maximum value of the axis variation range. + * @axis_def_value: Set to the previously configured default value of the axis variation range. + * + * Gets the axis range assigned by previous calls to hb_subset_input_set_axis_range. + * + * Return value: `true` if a range has been set for this axis tag, `false` otherwise. + * + * XSince: EXPERIMENTAL + **/ +HB_EXTERN hb_bool_t +hb_subset_input_get_axis_range (hb_subset_input_t *input, + hb_tag_t axis_tag, + float *axis_min_value, + float *axis_max_value, + float *axis_def_value) + +{ + Triple* triple; + if (!input->axes_location.has(axis_tag, &triple)) { + return false; + } + + *axis_min_value = triple->minimum; + *axis_def_value = triple->middle; + *axis_max_value = triple->maximum; + return true; +} #endif #endif diff --git a/gfx/harfbuzz/src/hb-subset-instancer-iup.cc b/gfx/harfbuzz/src/hb-subset-instancer-iup.cc new file mode 100644 index 0000000000..35a964d082 --- /dev/null +++ b/gfx/harfbuzz/src/hb-subset-instancer-iup.cc @@ -0,0 +1,532 @@ +/* + * Copyright © 2024 Google, Inc. + * + * This is part of HarfBuzz, a text shaping library. + * + * Permission is hereby granted, without written agreement and without + * license or royalty fees, to use, copy, modify, and distribute this + * software and its documentation for any purpose, provided that the + * above copyright notice and the following two paragraphs appear in + * all copies of this software. + * + * IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE TO ANY PARTY FOR + * DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES + * ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN + * IF THE COPYRIGHT HOLDER HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH + * DAMAGE. + * + * THE COPYRIGHT HOLDER SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, + * BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND + * FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS + * ON AN "AS IS" BASIS, AND THE COPYRIGHT HOLDER HAS NO OBLIGATION TO + * PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. + */ + +#include "hb-subset-instancer-iup.hh" + +/* This file is a straight port of the following: + * + * https://github.com/fonttools/fonttools/blob/main/Lib/fontTools/varLib/iup.py + * + * Where that file returns optimzied deltas vector, we return optimized + * referenced point indices. + */ + +constexpr static unsigned MAX_LOOKBACK = 8; + +static void _iup_contour_bound_forced_set (const hb_array_t<const contour_point_t> contour_points, + const hb_array_t<const int> x_deltas, + const hb_array_t<const int> y_deltas, + hb_set_t& forced_set, /* OUT */ + float tolerance = 0.f) +{ + unsigned len = contour_points.length; + unsigned next_i = 0; + for (int i = len - 1; i >= 0; i--) + { + unsigned last_i = (len + i -1) % len; + for (unsigned j = 0; j < 2; j++) + { + float cj, lcj, ncj; + int dj, ldj, ndj; + if (j == 0) + { + cj = contour_points.arrayZ[i].x; + dj = x_deltas.arrayZ[i]; + lcj = contour_points.arrayZ[last_i].x; + ldj = x_deltas.arrayZ[last_i]; + ncj = contour_points.arrayZ[next_i].x; + ndj = x_deltas.arrayZ[next_i]; + } + else + { + cj = contour_points.arrayZ[i].y; + dj = y_deltas.arrayZ[i]; + lcj = contour_points.arrayZ[last_i].y; + ldj = y_deltas.arrayZ[last_i]; + ncj = contour_points.arrayZ[next_i].y; + ndj = y_deltas.arrayZ[next_i]; + } + + float c1, c2; + int d1, d2; + if (lcj <= ncj) + { + c1 = lcj; + c2 = ncj; + d1 = ldj; + d2 = ndj; + } + else + { + c1 = ncj; + c2 = lcj; + d1 = ndj; + d2 = ldj; + } + + bool force = false; + if (c1 == c2) + { + if (abs (d1 - d2) > tolerance && abs (dj) > tolerance) + force = true; + } + else if (c1 <= cj && cj <= c2) + { + if (!(hb_min (d1, d2) - tolerance <= dj && + dj <= hb_max (d1, d2) + tolerance)) + force = true; + } + else + { + if (d1 != d2) + { + if (cj < c1) + { + if (abs (dj) > tolerance && + abs (dj - d1) > tolerance && + ((dj - tolerance < d1) != (d1 < d2))) + force = true; + } + else + { + if (abs (dj) > tolerance && + abs (dj - d2) > tolerance && + ((d2 < dj + tolerance) != (d1 < d2))) + force = true; + } + } + } + + if (force) + { + forced_set.add (i); + break; + } + } + next_i = i; + } +} + +template <typename T, + hb_enable_if (hb_is_trivially_copyable (T))> +static bool rotate_array (const hb_array_t<const T>& org_array, + int k, + hb_vector_t<T>& out) +{ + unsigned n = org_array.length; + if (!n) return true; + if (unlikely (!out.resize (n, false))) + return false; + + unsigned item_size = hb_static_size (T); + if (k < 0) + k = n - (-k) % n; + else + k %= n; + + hb_memcpy ((void *) out.arrayZ, (const void *) (org_array.arrayZ + n - k), k * item_size); + hb_memcpy ((void *) (out.arrayZ + k), (const void *) org_array.arrayZ, (n - k) * item_size); + return true; +} + +static bool rotate_set (const hb_set_t& org_set, + int k, + unsigned n, + hb_set_t& out) +{ + if (!n) return false; + k %= n; + if (k < 0) + k = n + k; + + if (k == 0) + { + out.set (org_set); + } + else + { + for (auto v : org_set) + out.add ((v + k) % n); + } + return !out.in_error (); +} + +/* Given two reference coordinates (start and end of contour_points array), + * output interpolated deltas for points in between */ +static bool _iup_segment (const hb_array_t<const contour_point_t> contour_points, + const hb_array_t<const int> x_deltas, + const hb_array_t<const int> y_deltas, + const contour_point_t& p1, const contour_point_t& p2, + int p1_dx, int p2_dx, + int p1_dy, int p2_dy, + hb_vector_t<float>& interp_x_deltas, /* OUT */ + hb_vector_t<float>& interp_y_deltas /* OUT */) +{ + unsigned n = contour_points.length; + if (unlikely (!interp_x_deltas.resize (n, false) || + !interp_y_deltas.resize (n, false))) + return false; + + for (unsigned j = 0; j < 2; j++) + { + float x1, x2, d1, d2; + float *out; + if (j == 0) + { + x1 = p1.x; + x2 = p2.x; + d1 = p1_dx; + d2 = p2_dx; + out = interp_x_deltas.arrayZ; + } + else + { + x1 = p1.y; + x2 = p2.y; + d1 = p1_dy; + d2 = p2_dy; + out = interp_y_deltas.arrayZ; + } + + if (x1 == x2) + { + if (d1 == d2) + { + for (unsigned i = 0; i < n; i++) + out[i] = d1; + } + else + { + for (unsigned i = 0; i < n; i++) + out[i] = 0.f; + } + continue; + } + + if (x1 > x2) + { + hb_swap (x1, x2); + hb_swap (d1, d2); + } + + float scale = (d2 - d1) / (x2 - x1); + for (unsigned i = 0; i < n; i++) + { + float x = j == 0 ? contour_points.arrayZ[i].x : contour_points.arrayZ[i].y; + float d; + if (x <= x1) + d = d1; + else if (x >= x2) + d = d2; + else + d = d1 + (x - x1) * scale; + + out[i] = d; + } + } + return true; +} + +static bool _can_iup_in_between (const hb_array_t<const contour_point_t> contour_points, + const hb_array_t<const int> x_deltas, + const hb_array_t<const int> y_deltas, + const contour_point_t& p1, const contour_point_t& p2, + int p1_dx, int p2_dx, + int p1_dy, int p2_dy, + float tolerance) +{ + hb_vector_t<float> interp_x_deltas, interp_y_deltas; + if (!_iup_segment (contour_points, x_deltas, y_deltas, + p1, p2, p1_dx, p2_dx, p1_dy, p2_dy, + interp_x_deltas, interp_y_deltas)) + return false; + + unsigned num = contour_points.length; + + for (unsigned i = 0; i < num; i++) + { + float dx = x_deltas.arrayZ[i] - interp_x_deltas.arrayZ[i]; + float dy = y_deltas.arrayZ[i] - interp_y_deltas.arrayZ[i]; + + if (sqrtf ((float)dx * dx + (float)dy * dy) > tolerance) + return false; + } + return true; +} + +static bool _iup_contour_optimize_dp (const contour_point_vector_t& contour_points, + const hb_vector_t<int>& x_deltas, + const hb_vector_t<int>& y_deltas, + const hb_set_t& forced_set, + float tolerance, + unsigned lookback, + hb_vector_t<unsigned>& costs, /* OUT */ + hb_vector_t<int>& chain /* OUT */) +{ + unsigned n = contour_points.length; + if (unlikely (!costs.resize (n, false) || + !chain.resize (n, false))) + return false; + + lookback = hb_min (lookback, MAX_LOOKBACK); + + for (unsigned i = 0; i < n; i++) + { + unsigned best_cost = (i == 0 ? 1 : costs.arrayZ[i-1] + 1); + + costs.arrayZ[i] = best_cost; + chain.arrayZ[i] = (i == 0 ? -1 : i - 1); + + if (i > 0 && forced_set.has (i - 1)) + continue; + + int lookback_index = hb_max ((int) i - (int) lookback + 1, -1); + for (int j = i - 2; j >= lookback_index; j--) + { + unsigned cost = j == -1 ? 1 : costs.arrayZ[j] + 1; + /* num points between i and j */ + unsigned num_points = i - j - 1; + unsigned p1 = (j == -1 ? n - 1 : j); + if (cost < best_cost && + _can_iup_in_between (contour_points.as_array ().sub_array (j + 1, num_points), + x_deltas.as_array ().sub_array (j + 1, num_points), + y_deltas.as_array ().sub_array (j + 1, num_points), + contour_points.arrayZ[p1], contour_points.arrayZ[i], + x_deltas.arrayZ[p1], x_deltas.arrayZ[i], + y_deltas.arrayZ[p1], y_deltas.arrayZ[i], + tolerance)) + { + best_cost = cost; + costs.arrayZ[i] = best_cost; + chain.arrayZ[i] = j; + } + + if (j > 0 && forced_set.has (j)) + break; + } + } + return true; +} + +static bool _iup_contour_optimize (const hb_array_t<const contour_point_t> contour_points, + const hb_array_t<const int> x_deltas, + const hb_array_t<const int> y_deltas, + hb_array_t<bool> opt_indices, /* OUT */ + float tolerance = 0.f) +{ + unsigned n = contour_points.length; + if (opt_indices.length != n || + x_deltas.length != n || + y_deltas.length != n) + return false; + + bool all_within_tolerance = true; + for (unsigned i = 0; i < n; i++) + { + int dx = x_deltas.arrayZ[i]; + int dy = y_deltas.arrayZ[i]; + if (sqrtf ((float)dx * dx + (float)dy * dy) > tolerance) + { + all_within_tolerance = false; + break; + } + } + + /* If all are within tolerance distance, do nothing, opt_indices is + * initilized to false */ + if (all_within_tolerance) + return true; + + /* If there's exactly one point, return it */ + if (n == 1) + { + opt_indices.arrayZ[0] = true; + return true; + } + + /* If all deltas are exactly the same, return just one (the first one) */ + bool all_deltas_are_equal = true; + for (unsigned i = 1; i < n; i++) + if (x_deltas.arrayZ[i] != x_deltas.arrayZ[0] || + y_deltas.arrayZ[i] != y_deltas.arrayZ[0]) + { + all_deltas_are_equal = false; + break; + } + + if (all_deltas_are_equal) + { + opt_indices.arrayZ[0] = true; + return true; + } + + /* else, solve the general problem using Dynamic Programming */ + hb_set_t forced_set; + _iup_contour_bound_forced_set (contour_points, x_deltas, y_deltas, forced_set, tolerance); + + if (!forced_set.is_empty ()) + { + int k = n - 1 - forced_set.get_max (); + if (k < 0) + return false; + + hb_vector_t<int> rot_x_deltas, rot_y_deltas; + contour_point_vector_t rot_points; + hb_set_t rot_forced_set; + if (!rotate_array (contour_points, k, rot_points) || + !rotate_array (x_deltas, k, rot_x_deltas) || + !rotate_array (y_deltas, k, rot_y_deltas) || + !rotate_set (forced_set, k, n, rot_forced_set)) + return false; + + hb_vector_t<unsigned> costs; + hb_vector_t<int> chain; + + if (!_iup_contour_optimize_dp (rot_points, rot_x_deltas, rot_y_deltas, + rot_forced_set, tolerance, n, + costs, chain)) + return false; + + hb_set_t solution; + int index = n - 1; + while (index != -1) + { + solution.add (index); + index = chain.arrayZ[index]; + } + + if (solution.is_empty () || + forced_set.get_population () > solution.get_population ()) + return false; + + for (unsigned i : solution) + opt_indices.arrayZ[i] = true; + + hb_vector_t<bool> rot_indices; + const hb_array_t<const bool> opt_indices_array (opt_indices.arrayZ, opt_indices.length); + rotate_array (opt_indices_array, -k, rot_indices); + + for (unsigned i = 0; i < n; i++) + opt_indices.arrayZ[i] = rot_indices.arrayZ[i]; + } + else + { + hb_vector_t<int> repeat_x_deltas, repeat_y_deltas; + contour_point_vector_t repeat_points; + + if (unlikely (!repeat_x_deltas.resize (n * 2, false) || + !repeat_y_deltas.resize (n * 2, false) || + !repeat_points.resize (n * 2, false))) + return false; + + unsigned contour_point_size = hb_static_size (contour_point_t); + for (unsigned i = 0; i < n; i++) + { + hb_memcpy ((void *) repeat_x_deltas.arrayZ, (const void *) x_deltas.arrayZ, n * sizeof (float)); + hb_memcpy ((void *) (repeat_x_deltas.arrayZ + n), (const void *) x_deltas.arrayZ, n * sizeof (float)); + + hb_memcpy ((void *) repeat_y_deltas.arrayZ, (const void *) y_deltas.arrayZ, n * sizeof (float)); + hb_memcpy ((void *) (repeat_y_deltas.arrayZ + n), (const void *) y_deltas.arrayZ, n * sizeof (float)); + + hb_memcpy ((void *) repeat_points.arrayZ, (const void *) contour_points.arrayZ, n * contour_point_size); + hb_memcpy ((void *) (repeat_points.arrayZ + n), (const void *) contour_points.arrayZ, n * contour_point_size); + } + + hb_vector_t<unsigned> costs; + hb_vector_t<int> chain; + if (!_iup_contour_optimize_dp (repeat_points, repeat_x_deltas, repeat_y_deltas, + forced_set, tolerance, n, + costs, chain)) + return false; + + unsigned best_cost = n + 1; + int len = costs.length; + hb_set_t best_sol; + for (int start = n - 1; start < len; start++) + { + hb_set_t solution; + int i = start; + int lookback = start - (int) n; + while (i > lookback) + { + solution.add (i % n); + i = chain.arrayZ[i]; + } + if (i == lookback) + { + unsigned cost_i = i < 0 ? 0 : costs.arrayZ[i]; + unsigned cost = costs.arrayZ[start] - cost_i; + if (cost <= best_cost) + { + best_sol.set (solution); + best_cost = cost; + } + } + } + + for (unsigned i = 0; i < n; i++) + if (best_sol.has (i)) + opt_indices.arrayZ[i] = true; + } + return true; +} + +bool iup_delta_optimize (const contour_point_vector_t& contour_points, + const hb_vector_t<int>& x_deltas, + const hb_vector_t<int>& y_deltas, + hb_vector_t<bool>& opt_indices, /* OUT */ + float tolerance) +{ + if (!opt_indices.resize (contour_points.length)) + return false; + + hb_vector_t<unsigned> end_points; + unsigned count = contour_points.length; + if (unlikely (!end_points.alloc (count))) + return false; + + for (unsigned i = 0; i < count - 4; i++) + if (contour_points.arrayZ[i].is_end_point) + end_points.push (i); + + /* phantom points */ + for (unsigned i = count - 4; i < count; i++) + end_points.push (i); + + if (end_points.in_error ()) return false; + + unsigned start = 0; + for (unsigned end : end_points) + { + unsigned len = end - start + 1; + if (!_iup_contour_optimize (contour_points.as_array ().sub_array (start, len), + x_deltas.as_array ().sub_array (start, len), + y_deltas.as_array ().sub_array (start, len), + opt_indices.as_array ().sub_array (start, len), + tolerance)) + return false; + start = end + 1; + } + return true; +} diff --git a/gfx/harfbuzz/src/hb-subset-instancer-iup.hh b/gfx/harfbuzz/src/hb-subset-instancer-iup.hh new file mode 100644 index 0000000000..7eac5935a4 --- /dev/null +++ b/gfx/harfbuzz/src/hb-subset-instancer-iup.hh @@ -0,0 +1,37 @@ +/* + * Copyright © 2024 Google, Inc. + * + * This is part of HarfBuzz, a text shaping library. + * + * Permission is hereby granted, without written agreement and without + * license or royalty fees, to use, copy, modify, and distribute this + * software and its documentation for any purpose, provided that the + * above copyright notice and the following two paragraphs appear in + * all copies of this software. + * + * IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE TO ANY PARTY FOR + * DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES + * ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN + * IF THE COPYRIGHT HOLDER HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH + * DAMAGE. + * + * THE COPYRIGHT HOLDER SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, + * BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND + * FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS + * ON AN "AS IS" BASIS, AND THE COPYRIGHT HOLDER HAS NO OBLIGATION TO + * PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. + */ + +#ifndef HB_SUBSET_INSTANCER_IUP_HH +#define HB_SUBSET_INSTANCER_IUP_HH + +#include "hb-subset-plan.hh" +/* given contour points and deltas, optimize a set of referenced points within error + * tolerance. Returns optimized referenced point indices */ +HB_INTERNAL bool iup_delta_optimize (const contour_point_vector_t& contour_points, + const hb_vector_t<int>& x_deltas, + const hb_vector_t<int>& y_deltas, + hb_vector_t<bool>& opt_indices, /* OUT */ + float tolerance = 0.f); + +#endif /* HB_SUBSET_INSTANCER_IUP_HH */ diff --git a/gfx/harfbuzz/src/hb-subset-instancer-solver.cc b/gfx/harfbuzz/src/hb-subset-instancer-solver.cc index 4876bc4379..70783c0a0d 100644 --- a/gfx/harfbuzz/src/hb-subset-instancer-solver.cc +++ b/gfx/harfbuzz/src/hb-subset-instancer-solver.cc @@ -256,7 +256,10 @@ _solve (Triple tent, Triple axisLimit, bool negative = false) */ float newUpper = peak + (1 - gain) * (upper - peak); assert (axisMax <= newUpper); // Because outGain > gain - if (newUpper <= axisDef + (axisMax - axisDef) * 2) + /* Disabled because ots doesn't like us: + * https://github.com/fonttools/fonttools/issues/3350 */ + + if (false && (newUpper <= axisDef + (axisMax - axisDef) * 2)) { upper = newUpper; if (!negative && axisDef + (axisMax - axisDef) * MAX_F2DOT14 < upper) diff --git a/gfx/harfbuzz/src/hb-subset-plan-member-list.hh b/gfx/harfbuzz/src/hb-subset-plan-member-list.hh index 71da80e387..74416b92f9 100644 --- a/gfx/harfbuzz/src/hb-subset-plan-member-list.hh +++ b/gfx/harfbuzz/src/hb-subset-plan-member-list.hh @@ -140,6 +140,15 @@ HB_SUBSET_PLAN_MEMBER (mutable hb_vector_t<unsigned>, bounds_height_vec) //map: new_gid -> contour points vector HB_SUBSET_PLAN_MEMBER (mutable hb_hashmap_t E(<hb_codepoint_t, contour_point_vector_t>), new_gid_contour_points_map) +//new gids set for composite glyphs +HB_SUBSET_PLAN_MEMBER (hb_set_t, composite_new_gids) + +//Old BASE item variation index -> (New varidx, 0) mapping +HB_SUBSET_PLAN_MEMBER (hb_hashmap_t E(<unsigned, hb_pair_t E(<unsigned, int>)>), base_variation_idx_map) + +//BASE table varstore retained varidx mapping +HB_SUBSET_PLAN_MEMBER (hb_vector_t<hb_inc_bimap_t>, base_varstore_inner_maps) + #ifdef HB_EXPERIMENTAL_API // name table overrides map: hb_ot_name_record_ids_t-> name string new value or // None to indicate should remove diff --git a/gfx/harfbuzz/src/hb-subset-plan.cc b/gfx/harfbuzz/src/hb-subset-plan.cc index 5786223196..068fddaedd 100644 --- a/gfx/harfbuzz/src/hb-subset-plan.cc +++ b/gfx/harfbuzz/src/hb-subset-plan.cc @@ -32,6 +32,7 @@ #include "hb-ot-cmap-table.hh" #include "hb-ot-glyf-table.hh" +#include "hb-ot-layout-base-table.hh" #include "hb-ot-layout-gdef-table.hh" #include "hb-ot-layout-gpos-table.hh" #include "hb-ot-layout-gsub-table.hh" @@ -431,6 +432,52 @@ _collect_layout_variation_indices (hb_subset_plan_t* plan) gdef.destroy (); gpos.destroy (); } + +#ifndef HB_NO_BASE +/* used by BASE table only, delta is always set to 0 in the output map */ +static inline void +_remap_variation_indices (const hb_set_t& indices, + unsigned subtable_count, + hb_hashmap_t<unsigned, hb_pair_t<unsigned, int>>& variation_idx_delta_map /* OUT */) +{ + unsigned new_major = 0, new_minor = 0; + unsigned last_major = (indices.get_min ()) >> 16; + for (unsigned idx : indices) + { + uint16_t major = idx >> 16; + if (major >= subtable_count) break; + if (major != last_major) + { + new_minor = 0; + ++new_major; + } + + unsigned new_idx = (new_major << 16) + new_minor; + variation_idx_delta_map.set (idx, hb_pair_t<unsigned, int> (new_idx, 0)); + ++new_minor; + last_major = major; + } +} + +static inline void +_collect_base_variation_indices (hb_subset_plan_t* plan) +{ + hb_blob_ptr_t<OT::BASE> base = plan->source_table<OT::BASE> (); + if (!base->has_var_store ()) + { + base.destroy (); + return; + } + + hb_set_t varidx_set; + base->collect_variation_indices (plan, varidx_set); + unsigned subtable_count = base->get_var_store ().get_sub_table_count (); + base.destroy (); + + _remap_variation_indices (varidx_set, subtable_count, plan->base_variation_idx_map); + _generate_varstore_inner_maps (varidx_set, subtable_count, plan->base_varstore_inner_maps); +} +#endif #endif static inline void @@ -994,8 +1041,8 @@ _update_instance_metrics_map_from_cff2 (hb_subset_plan_t *plan) OT::cff2::accelerator_t cff2 (plan->source); if (!cff2.is_valid ()) return; - hb_font_t *font = nullptr; - if (unlikely (!plan->check_success (font = _get_hb_font_with_variations (plan)))) + hb_font_t *font = _get_hb_font_with_variations (plan); + if (unlikely (!plan->check_success (font != nullptr))) { hb_font_destroy (font); return; @@ -1073,8 +1120,8 @@ _update_instance_metrics_map_from_cff2 (hb_subset_plan_t *plan) static bool _get_instance_glyphs_contour_points (hb_subset_plan_t *plan) { - /* contour_points vector only needed for updating gvar table (infer delta) - * during partial instancing */ + /* contour_points vector only needed for updating gvar table (infer delta and + * iup delta optimization) during partial instancing */ if (plan->user_axes_location.is_empty () || plan->all_axes_pinned) return true; @@ -1092,10 +1139,17 @@ _get_instance_glyphs_contour_points (hb_subset_plan_t *plan) } hb_codepoint_t old_gid = _.second; - if (unlikely (!glyf.glyph_for_gid (old_gid).get_all_points_without_var (plan->source, all_points))) + auto glyph = glyf.glyph_for_gid (old_gid); + if (unlikely (!glyph.get_all_points_without_var (plan->source, all_points))) return false; if (unlikely (!plan->new_gid_contour_points_map.set (new_gid, all_points))) return false; + +#ifdef HB_EXPERIMENTAL_API + /* composite new gids are only needed by iup delta optimization */ + if ((plan->flags & HB_SUBSET_FLAGS_OPTIMIZE_IUP_DELTAS) && glyph.is_composite ()) + plan->composite_new_gids.add (new_gid); +#endif } return true; } @@ -1205,6 +1259,13 @@ hb_subset_plan_t::hb_subset_plan_t (hb_face_t *face, if (!drop_tables.has (HB_OT_TAG_GDEF)) _remap_used_mark_sets (this, used_mark_sets_map); +#ifndef HB_NO_VAR +#ifndef HB_NO_BASE + if (!drop_tables.has (HB_OT_TAG_BASE)) + _collect_base_variation_indices (this); +#endif +#endif + if (unlikely (in_error ())) return; diff --git a/gfx/harfbuzz/src/hb-subset-plan.hh b/gfx/harfbuzz/src/hb-subset-plan.hh index 1f19a58c1e..19a9fa6918 100644 --- a/gfx/harfbuzz/src/hb-subset-plan.hh +++ b/gfx/harfbuzz/src/hb-subset-plan.hh @@ -78,6 +78,13 @@ struct contour_point_t y = x * matrix[1] + y * matrix[3]; x = x_; } + + void add_delta (float delta_x, float delta_y) + { + x += delta_x; + y += delta_y; + } + HB_ALWAYS_INLINE void translate (const contour_point_t &p) { x += p.x; y += p.y; } @@ -99,6 +106,22 @@ struct contour_point_vector_t : hb_vector_t<contour_point_t> unsigned count = a.length; hb_memcpy (arrayZ, a.arrayZ, count * sizeof (arrayZ[0])); } + + bool add_deltas (const hb_vector_t<float> deltas_x, + const hb_vector_t<float> deltas_y, + const hb_vector_t<bool> indices) + { + if (indices.length != deltas_x.length || + indices.length != deltas_y.length) + return false; + + for (unsigned i = 0; i < indices.length; i++) + { + if (!indices.arrayZ[i]) continue; + arrayZ[i].add_delta (deltas_x.arrayZ[i], deltas_y.arrayZ[i]); + } + return true; + } }; namespace OT { @@ -147,7 +170,7 @@ struct hb_subset_plan_t bool gsub_insert_catch_all_feature_variation_rec; bool gpos_insert_catch_all_feature_variation_rec; - // whether GDEF VarStore is retained + // whether GDEF ItemVariationStore is retained mutable bool has_gdef_varstore; #define HB_SUBSET_PLAN_MEMBER(Type, Name) Type Name; diff --git a/gfx/harfbuzz/src/hb-subset.cc b/gfx/harfbuzz/src/hb-subset.cc index 06e77dd8eb..f10ef54dbd 100644 --- a/gfx/harfbuzz/src/hb-subset.cc +++ b/gfx/harfbuzz/src/hb-subset.cc @@ -48,6 +48,7 @@ #include "hb-ot-cff2-table.hh" #include "hb-ot-vorg-table.hh" #include "hb-ot-name-table.hh" +#include "hb-ot-layout-base-table.hh" #include "hb-ot-layout-gsub-table.hh" #include "hb-ot-layout-gpos-table.hh" #include "hb-ot-var-avar-table.hh" @@ -503,6 +504,7 @@ _subset_table (hb_subset_plan_t *plan, case HB_OT_TAG_CBLC: return _subset<const OT::CBLC> (plan, buf); case HB_OT_TAG_CBDT: return true; /* skip CBDT, handled by CBLC */ case HB_OT_TAG_MATH: return _subset<const OT::MATH> (plan, buf); + case HB_OT_TAG_BASE: return _subset<const OT::BASE> (plan, buf); #ifndef HB_NO_SUBSET_CFF case HB_OT_TAG_CFF1: return _subset<const OT::cff1> (plan, buf); @@ -548,6 +550,7 @@ _subset_table (hb_subset_plan_t *plan, } #endif return _passthrough (plan, tag); + default: if (plan->flags & HB_SUBSET_FLAGS_PASSTHROUGH_UNRECOGNIZED) return _passthrough (plan, tag); diff --git a/gfx/harfbuzz/src/hb-subset.h b/gfx/harfbuzz/src/hb-subset.h index d79e7f762a..73dcae4660 100644 --- a/gfx/harfbuzz/src/hb-subset.h +++ b/gfx/harfbuzz/src/hb-subset.h @@ -76,6 +76,8 @@ typedef struct hb_subset_plan_t hb_subset_plan_t; * @HB_SUBSET_FLAGS_IFTB_REQUIREMENTS: If set enforce requirements on the output subset * to allow it to be used with incremental font transfer IFTB patches. Primarily, * this forces all outline data to use long (32 bit) offsets. Since: EXPERIMENTAL + * @HB_SUBSET_FLAGS_OPTIMIZE_IUP_DELTAS: If set perform IUP delta optimization on the + * remaining gvar table's deltas. Since: EXPERIMENTAL * * List of boolean properties that can be configured on the subset input. * @@ -95,6 +97,7 @@ typedef enum { /*< flags >*/ HB_SUBSET_FLAGS_NO_LAYOUT_CLOSURE = 0x00000200u, #ifdef HB_EXPERIMENTAL_API HB_SUBSET_FLAGS_IFTB_REQUIREMENTS = 0x00000400u, + HB_SUBSET_FLAGS_OPTIMIZE_IUP_DELTAS = 0x00000800u, #endif } hb_subset_flags_t; @@ -171,6 +174,10 @@ hb_subset_input_set_flags (hb_subset_input_t *input, unsigned value); HB_EXTERN hb_bool_t +hb_subset_input_pin_all_axes_to_default (hb_subset_input_t *input, + hb_face_t *face); + +HB_EXTERN hb_bool_t hb_subset_input_pin_axis_to_default (hb_subset_input_t *input, hb_face_t *face, hb_tag_t axis_tag); @@ -183,12 +190,19 @@ hb_subset_input_pin_axis_location (hb_subset_input_t *input, #ifdef HB_EXPERIMENTAL_API HB_EXTERN hb_bool_t +hb_subset_input_get_axis_range (hb_subset_input_t *input, + hb_tag_t axis_tag, + float *axis_min_value, + float *axis_max_value, + float *axis_def_value); + +HB_EXTERN hb_bool_t hb_subset_input_set_axis_range (hb_subset_input_t *input, hb_face_t *face, hb_tag_t axis_tag, float axis_min_value, float axis_max_value, - float *axis_def_value); + float axis_def_value); HB_EXTERN hb_bool_t hb_subset_input_override_name_table (hb_subset_input_t *input, diff --git a/gfx/harfbuzz/src/hb-vector.hh b/gfx/harfbuzz/src/hb-vector.hh index dfe1b7d1c7..c0cc7063ff 100644 --- a/gfx/harfbuzz/src/hb-vector.hh +++ b/gfx/harfbuzz/src/hb-vector.hh @@ -78,7 +78,7 @@ struct hb_vector_t if (unlikely (in_error ())) return; copy_array (o); } - hb_vector_t (hb_vector_t &&o) + hb_vector_t (hb_vector_t &&o) noexcept { allocated = o.allocated; length = o.length; @@ -122,7 +122,7 @@ struct hb_vector_t resize (0); } - friend void swap (hb_vector_t& a, hb_vector_t& b) + friend void swap (hb_vector_t& a, hb_vector_t& b) noexcept { hb_swap (a.allocated, b.allocated); hb_swap (a.length, b.length); @@ -139,7 +139,7 @@ struct hb_vector_t return *this; } - hb_vector_t& operator = (hb_vector_t &&o) + hb_vector_t& operator = (hb_vector_t &&o) noexcept { hb_swap (*this, o); return *this; diff --git a/gfx/harfbuzz/src/hb-version.h b/gfx/harfbuzz/src/hb-version.h index b08dd1f09f..d90e36391c 100644 --- a/gfx/harfbuzz/src/hb-version.h +++ b/gfx/harfbuzz/src/hb-version.h @@ -53,14 +53,14 @@ HB_BEGIN_DECLS * * The micro component of the library version available at compile-time. */ -#define HB_VERSION_MICRO 0 +#define HB_VERSION_MICRO 1 /** * HB_VERSION_STRING: * * A string literal containing the library version available at compile-time. */ -#define HB_VERSION_STRING "8.3.0" +#define HB_VERSION_STRING "8.3.1" /** * HB_VERSION_ATLEAST: diff --git a/gfx/harfbuzz/src/hb.hh b/gfx/harfbuzz/src/hb.hh index 972608d6a3..0ceeb99f50 100644 --- a/gfx/harfbuzz/src/hb.hh +++ b/gfx/harfbuzz/src/hb.hh @@ -64,6 +64,7 @@ #pragma GCC diagnostic error "-Wbitwise-instead-of-logical" #pragma GCC diagnostic error "-Wcast-align" #pragma GCC diagnostic error "-Wcast-function-type" +#pragma GCC diagnostic error "-Wcast-function-type-strict" #pragma GCC diagnostic error "-Wconstant-conversion" #pragma GCC diagnostic error "-Wcomma" #pragma GCC diagnostic error "-Wdelete-non-virtual-dtor" @@ -177,6 +178,11 @@ #define HB_EXTERN __declspec (dllexport) extern #endif +// https://github.com/harfbuzz/harfbuzz/pull/4619 +#ifndef __STDC_FORMAT_MACROS +#define __STDC_FORMAT_MACROS 1 +#endif + #include "hb.h" #define HB_H_IN #include "hb-ot.h" @@ -212,6 +218,12 @@ #include <winapifamily.h> #endif +#ifndef PRId32 +# define PRId32 "d" +# define PRIu32 "u" +# define PRIx32 "x" +#endif + #define HB_PASTE1(a,b) a##b #define HB_PASTE(a,b) HB_PASTE1(a,b) diff --git a/gfx/harfbuzz/src/meson.build b/gfx/harfbuzz/src/meson.build index 77c7e75017..daa9198058 100644 --- a/gfx/harfbuzz/src/meson.build +++ b/gfx/harfbuzz/src/meson.build @@ -371,6 +371,8 @@ hb_subset_sources = files( 'hb-subset-cff2.cc', 'hb-subset-input.cc', 'hb-subset-input.hh', + 'hb-subset-instancer-iup.hh', + 'hb-subset-instancer-iup.cc', 'hb-subset-instancer-solver.hh', 'hb-subset-instancer-solver.cc', 'hb-subset-plan.cc', @@ -680,7 +682,8 @@ if conf.get('HAVE_CAIRO', 0) == 1 endif if get_option('tests').enabled() - # TODO: MSVC gives the following, + # TODO: Microsoft LINK gives the following because extern, non dllexport + # symbols can only be used when linking against a static library # error LNK2019: unresolved external symbol "unsigned __int64 const * const _hb_NullPool" if cpp.get_define('_MSC_FULL_VER') == '' noinst_programs = { @@ -722,13 +725,13 @@ if get_option('tests').enabled() 'test-repacker': ['test-repacker.cc', 'hb-static.cc', 'graph/gsubgpos-context.cc'], 'test-instancer-solver': ['test-subset-instancer-solver.cc', 'hb-subset-instancer-solver.cc', 'hb-static.cc'], 'test-priority-queue': ['test-priority-queue.cc', 'hb-static.cc'], - 'test-tuple-varstore': ['test-tuple-varstore.cc', 'hb-subset-instancer-solver.cc', 'hb-static.cc'], - 'test-item-varstore': ['test-item-varstore.cc', 'hb-subset-instancer-solver.cc', 'hb-static.cc'], + 'test-tuple-varstore': ['test-tuple-varstore.cc', 'hb-subset-instancer-solver.cc', 'hb-subset-instancer-iup.cc', 'hb-static.cc'], + 'test-item-varstore': ['test-item-varstore.cc', 'hb-subset-instancer-solver.cc', 'hb-subset-instancer-iup.cc', 'hb-static.cc'], 'test-unicode-ranges': ['test-unicode-ranges.cc'], } foreach name, source : compiled_tests - if cpp.get_argument_syntax() == 'msvc' and source.contains('hb-static.cc') - # TODO: MSVC doesn't like tests having hb-static.cc, fix them + if cpp.get_define('_MSC_FULL_VER') != '' and source.contains('hb-static.cc') + # TODO: Microsoft compilers cannot link tests using hb-static.cc, fix them continue endif test(name, executable(name, source, diff --git a/gfx/harfbuzz/src/moz.build b/gfx/harfbuzz/src/moz.build index 7944026c27..864c67db14 100644 --- a/gfx/harfbuzz/src/moz.build +++ b/gfx/harfbuzz/src/moz.build @@ -86,6 +86,7 @@ UNIFIED_SOURCES += [ 'hb-shaper.cc', 'hb-static.cc', 'hb-style.cc', + 'hb-subset-instancer-iup.cc', 'hb-unicode.cc', 'hb-wasm-api.cc', 'hb-wasm-shape.cc', diff --git a/gfx/ipc/CanvasRenderThread.cpp b/gfx/ipc/CanvasRenderThread.cpp index 853806a222..53a87b1cb9 100644 --- a/gfx/ipc/CanvasRenderThread.cpp +++ b/gfx/ipc/CanvasRenderThread.cpp @@ -152,6 +152,9 @@ void CanvasRenderThread::Shutdown() { // This closes all of the IPDL actors with possibly active task queues. CanvasManagerParent::Shutdown(); + // Queue any remaining global cleanup for CanvasTranslator + layers::CanvasTranslator::Shutdown(); + // Any task queues that are in the process of shutting down are tracked in // mPendingShutdownTaskQueues. We need to block on each one until all events // are flushed so that we can safely teardown RemoteTextureMap afterwards. diff --git a/gfx/layers/CanvasDrawEventRecorder.cpp b/gfx/layers/CanvasDrawEventRecorder.cpp index af143bf5cb..6140fc4ba7 100644 --- a/gfx/layers/CanvasDrawEventRecorder.cpp +++ b/gfx/layers/CanvasDrawEventRecorder.cpp @@ -127,6 +127,7 @@ int64_t CanvasDrawEventRecorder::CreateCheckpoint() { int64_t checkpoint = mHeader->eventCount; RecordEvent(RecordedCheckpoint()); ClearProcessedExternalSurfaces(); + ClearProcessedExternalImages(); return checkpoint; } @@ -276,6 +277,7 @@ void CanvasDrawEventRecorder::DropFreeBuffers() { } ClearProcessedExternalSurfaces(); + ClearProcessedExternalImages(); } void CanvasDrawEventRecorder::IncrementEventCount() { @@ -444,6 +446,16 @@ void CanvasDrawEventRecorder::StoreSourceSurfaceRecording( DrawEventRecorderPrivate::StoreSourceSurfaceRecording(aSurface, aReason); } +void CanvasDrawEventRecorder::StoreImageRecording( + const RefPtr<Image>& aImageOfSurfaceDescriptor, const char* aReasony) { + NS_ASSERT_OWNINGTHREAD(CanvasDrawEventRecorder); + + StoreExternalImageRecording(aImageOfSurfaceDescriptor); + mExternalImages.back().mEventCount = mHeader->eventCount; + + ClearProcessedExternalImages(); +} + void CanvasDrawEventRecorder::ClearProcessedExternalSurfaces() { while (!mExternalSurfaces.empty()) { if (mExternalSurfaces.front().mEventCount > mHeader->processedCount) { @@ -453,5 +465,14 @@ void CanvasDrawEventRecorder::ClearProcessedExternalSurfaces() { } } +void CanvasDrawEventRecorder::ClearProcessedExternalImages() { + while (!mExternalImages.empty()) { + if (mExternalImages.front().mEventCount > mHeader->processedCount) { + break; + } + mExternalImages.pop_front(); + } +} + } // namespace layers } // namespace mozilla diff --git a/gfx/layers/CanvasDrawEventRecorder.h b/gfx/layers/CanvasDrawEventRecorder.h index c9eacf27ac..aa95eec3e6 100644 --- a/gfx/layers/CanvasDrawEventRecorder.h +++ b/gfx/layers/CanvasDrawEventRecorder.h @@ -111,6 +111,9 @@ class CanvasDrawEventRecorder final : public gfx::DrawEventRecorderPrivate, void StoreSourceSurfaceRecording(gfx::SourceSurface* aSurface, const char* aReason) final; + void StoreImageRecording(const RefPtr<Image>& aImageOfSurfaceDescriptor, + const char* aReasony) final; + gfx::RecorderType GetRecorderType() const override { return gfx::RecorderType::CANVAS; } @@ -134,6 +137,8 @@ class CanvasDrawEventRecorder final : public gfx::DrawEventRecorderPrivate, void ClearProcessedExternalSurfaces(); + void ClearProcessedExternalImages(); + protected: gfx::ContiguousBuffer& GetContiguousBuffer(size_t aSize) final; diff --git a/gfx/layers/FrameMetrics.h b/gfx/layers/FrameMetrics.h index 5d13d36703..c79a088e53 100644 --- a/gfx/layers/FrameMetrics.h +++ b/gfx/layers/FrameMetrics.h @@ -700,7 +700,7 @@ MOZ_DEFINE_ENUM_CLASS_WITH_BASE( std::ostream& operator<<(std::ostream& aStream, const OverscrollBehavior& aBehavior); -struct OverscrollBehaviorInfo { +struct OverscrollBehaviorInfo final { OverscrollBehaviorInfo(); // Construct from StyleOverscrollBehavior values. @@ -711,6 +711,8 @@ struct OverscrollBehaviorInfo { friend std::ostream& operator<<(std::ostream& aStream, const OverscrollBehaviorInfo& aInfo); + auto MutTiedFields() { return std::tie(mBehaviorX, mBehaviorY); } + OverscrollBehavior mBehaviorX; OverscrollBehavior mBehaviorY; }; diff --git a/gfx/layers/LayersTypes.h b/gfx/layers/LayersTypes.h index 340ea76fa5..c02851d81f 100644 --- a/gfx/layers/LayersTypes.h +++ b/gfx/layers/LayersTypes.h @@ -46,9 +46,11 @@ class TextureHost; #undef NONE #undef OPAQUE -struct LayersId { +struct LayersId final { uint64_t mId = 0; + auto MutTiedFields() { return std::tie(mId); } + bool IsValid() const { return mId != 0; } // Allow explicit cast to a uint64_t for now @@ -75,9 +77,11 @@ struct LayersId { }; template <typename T> -struct BaseTransactionId { +struct BaseTransactionId final { uint64_t mId = 0; + auto MutTiedFields() { return std::tie(mId); } + bool IsValid() const { return mId != 0; } [[nodiscard]] BaseTransactionId<T> Next() const { diff --git a/gfx/layers/NativeLayerCA.h b/gfx/layers/NativeLayerCA.h index b41ac36c23..93b6b3a6de 100644 --- a/gfx/layers/NativeLayerCA.h +++ b/gfx/layers/NativeLayerCA.h @@ -143,7 +143,7 @@ class NativeLayerRootCA : public NativeLayerRoot { void SetWindowIsFullscreen(bool aFullscreen); - VideoLowPowerType CheckVideoLowPower(); + VideoLowPowerType CheckVideoLowPower(const MutexAutoLock& aProofOfLock); protected: explicit NativeLayerRootCA(CALayer* aLayer); @@ -335,8 +335,7 @@ class NativeLayerCA : public NativeLayer { Maybe<SurfaceWithInvalidRegion> GetUnusedSurfaceAndCleanUp( const MutexAutoLock& aProofOfLock); - bool IsVideo(); - bool IsVideoAndLocked(const MutexAutoLock& aProofOfLock); + bool IsVideo(const MutexAutoLock& aProofOfLock); bool ShouldSpecializeVideo(const MutexAutoLock& aProofOfLock); bool HasExtent() const { return mHasExtent; } void SetHasExtent(bool aHasExtent) { mHasExtent = aHasExtent; } @@ -484,6 +483,7 @@ class NativeLayerCA : public NativeLayer { bool mSpecializeVideo = false; bool mHasExtent = false; bool mIsDRM = false; + bool mIsTextureHostVideo = false; #ifdef NIGHTLY_BUILD // Track the consistency of our caller's API usage. Layers that are drawn diff --git a/gfx/layers/NativeLayerCA.mm b/gfx/layers/NativeLayerCA.mm index 42a889184e..a983e3a45e 100644 --- a/gfx/layers/NativeLayerCA.mm +++ b/gfx/layers/NativeLayerCA.mm @@ -330,37 +330,38 @@ bool NativeLayerRootCA::CommitToScreen() { mWindowIsFullscreen); mCommitPending = false; - } - if (StaticPrefs::gfx_webrender_debug_dump_native_layer_tree_to_file()) { - static uint32_t sFrameID = 0; - uint32_t frameID = sFrameID++; - - NSString* dirPath = - [NSString stringWithFormat:@"%@/Desktop/nativelayerdumps-%d", - NSHomeDirectory(), getpid()]; - if ([NSFileManager.defaultManager createDirectoryAtPath:dirPath - withIntermediateDirectories:YES - attributes:nil - error:nullptr]) { - NSString* filename = - [NSString stringWithFormat:@"frame-%d.html", frameID]; - NSString* filePath = [dirPath stringByAppendingPathComponent:filename]; - DumpLayerTreeToFile([filePath UTF8String]); - } else { - NSLog(@"Failed to create directory %@", dirPath); + if (StaticPrefs::gfx_webrender_debug_dump_native_layer_tree_to_file()) { + static uint32_t sFrameID = 0; + uint32_t frameID = sFrameID++; + + NSString* dirPath = + [NSString stringWithFormat:@"%@/Desktop/nativelayerdumps-%d", + NSHomeDirectory(), getpid()]; + if ([NSFileManager.defaultManager createDirectoryAtPath:dirPath + withIntermediateDirectories:YES + attributes:nil + error:nullptr]) { + NSString* filename = + [NSString stringWithFormat:@"frame-%d.html", frameID]; + NSString* filePath = [dirPath stringByAppendingPathComponent:filename]; + DumpLayerTreeToFile([filePath UTF8String]); + } else { + NSLog(@"Failed to create directory %@", dirPath); + } } - } - // Decide if we are going to emit telemetry about video low power on this - // commit. - static const int32_t TELEMETRY_COMMIT_PERIOD = - StaticPrefs::gfx_core_animation_low_power_telemetry_frames_AtStartup(); - mTelemetryCommitCount = (mTelemetryCommitCount + 1) % TELEMETRY_COMMIT_PERIOD; - if (mTelemetryCommitCount == 0) { - // Figure out if we are hitting video low power mode. - VideoLowPowerType videoLowPower = CheckVideoLowPower(); - EmitTelemetryForVideoLowPower(videoLowPower); + // Decide if we are going to emit telemetry about video low power on this + // commit. + static const int32_t TELEMETRY_COMMIT_PERIOD = + StaticPrefs::gfx_core_animation_low_power_telemetry_frames_AtStartup(); + mTelemetryCommitCount = + (mTelemetryCommitCount + 1) % TELEMETRY_COMMIT_PERIOD; + if (mTelemetryCommitCount == 0) { + // Figure out if we are hitting video low power mode. + VideoLowPowerType videoLowPower = CheckVideoLowPower(lock); + EmitTelemetryForVideoLowPower(videoLowPower); + } } return true; @@ -579,7 +580,8 @@ void NativeLayerRootCA::SetWindowIsFullscreen(bool aFullscreen) { return components[componentCount - 1] >= 1.0f; } -VideoLowPowerType NativeLayerRootCA::CheckVideoLowPower() { +VideoLowPowerType NativeLayerRootCA::CheckVideoLowPower( + const MutexAutoLock& aProofOfLock) { // This deteremines whether the current layer contents qualify for the // macOS Core Animation video low power mode. Those requirements are // summarized at @@ -609,7 +611,7 @@ VideoLowPowerType NativeLayerRootCA::CheckVideoLowPower() { secondCALayer = topCALayer; topCALayer = topLayer->UnderlyingCALayer(WhichRepresentation::ONSCREEN); - topLayerIsVideo = topLayer->IsVideo(); + topLayerIsVideo = topLayer->IsVideo(aProofOfLock); if (topLayerIsVideo) { ++videoLayerCount; } @@ -835,9 +837,8 @@ NativeLayerCA::NativeLayerCA(bool aIsOpaque) mIsOpaque(aIsOpaque) { #ifdef NIGHTLY_BUILD if (StaticPrefs::gfx_core_animation_specialize_video_log()) { - NSLog(@"VIDEO_LOG: NativeLayerCA: %p is being created to host video, which " - @"will force a video " - @"layer rebuild.", + NSLog(@"VIDEO_LOG: NativeLayerCA: %p is being created to host an external " + @"image, which may force a video layer rebuild.", this); } #endif @@ -864,7 +865,7 @@ NativeLayerCA::~NativeLayerCA() { if (mHasEverAttachExternalImage && StaticPrefs::gfx_core_animation_specialize_video_log()) { NSLog(@"VIDEO_LOG: ~NativeLayerCA: %p is being destroyed after hosting " - @"video.", + @"an external image.", this); } #endif @@ -902,6 +903,9 @@ void NativeLayerCA::AttachExternalImage(wr::RenderTextureHost* aExternalImage) { return; } + // Determine if TextureHost is a video surface. + mIsTextureHostVideo = gfx::Info(mTextureHost->GetFormat())->isYuv; + gfx::IntSize oldSize = mSize; mSize = texture->GetSize(0); bool changedSizeAndDisplayRect = (mSize != oldSize); @@ -933,18 +937,15 @@ void NativeLayerCA::AttachExternalImage(wr::RenderTextureHost* aExternalImage) { }); } -bool NativeLayerCA::IsVideo() { - // Anything with a texture host is considered a video source. - return mTextureHost; -} - -bool NativeLayerCA::IsVideoAndLocked(const MutexAutoLock& aProofOfLock) { - // Anything with a texture host is considered a video source. - return mTextureHost; +bool NativeLayerCA::IsVideo(const MutexAutoLock& aProofOfLock) { + // If we have a texture host, we've checked to see if it's providing video. + // And if we don't have a texture host, it isn't video, so we just check + // the value we've computed. + return mIsTextureHostVideo; } bool NativeLayerCA::ShouldSpecializeVideo(const MutexAutoLock& aProofOfLock) { - if (!IsVideoAndLocked(aProofOfLock)) { + if (!IsVideo(aProofOfLock)) { // Only videos are eligible. return false; } @@ -1410,6 +1411,8 @@ void NativeLayerCA::NotifySurfaceReady() { mInProgressSurface, "NotifySurfaceReady called without preceding call to NextSurface"); + mIsTextureHostVideo = false; + if (mInProgressLockedIOSurface) { mInProgressLockedIOSurface->Unlock(false); mInProgressLockedIOSurface = nullptr; @@ -1465,7 +1468,7 @@ void NativeLayerCA::ForAllRepresentations(F aFn) { NativeLayerCA::UpdateType NativeLayerCA::HasUpdate( WhichRepresentation aRepresentation) { MutexAutoLock lock(mMutex); - return GetRepresentation(aRepresentation).HasUpdate(IsVideoAndLocked(lock)); + return GetRepresentation(aRepresentation).HasUpdate(IsVideo(lock)); } /* static */ @@ -1510,7 +1513,7 @@ bool NativeLayerCA::ApplyChanges(WhichRepresentation aRepresentation, .ApplyChanges(aUpdate, mSize, mIsOpaque, mPosition, mTransform, mDisplayRect, mClipRect, mBackingScale, mSurfaceIsFlipped, mSamplingFilter, mSpecializeVideo, surface, mColor, mIsDRM, - IsVideo()); + IsVideo(lock)); } CALayer* NativeLayerCA::UnderlyingCALayer(WhichRepresentation aRepresentation) { diff --git a/gfx/layers/RemoteTextureMap.cpp b/gfx/layers/RemoteTextureMap.cpp index 3fe3b13deb..95db676651 100644 --- a/gfx/layers/RemoteTextureMap.cpp +++ b/gfx/layers/RemoteTextureMap.cpp @@ -19,6 +19,7 @@ #include "mozilla/layers/RemoteTextureHostWrapper.h" #include "mozilla/layers/TextureClientSharedSurface.h" #include "mozilla/layers/WebRenderTextureHost.h" +#include "mozilla/StaticPrefs_gfx.h" #include "mozilla/StaticPrefs_webgl.h" #include "mozilla/webgpu/ExternalTexture.h" #include "mozilla/webrender/RenderThread.h" @@ -334,7 +335,9 @@ bool RemoteTextureMap::RecycleTexture( // Recycle texture data recycled.mTextureData = std::move(aHolder.mTextureData); } - aRecycleBin->mRecycledTextures.push_back(std::move(recycled)); + if (!StaticPrefs::gfx_remote_texture_recycle_disabled()) { + aRecycleBin->mRecycledTextures.push_back(std::move(recycled)); + } return true; } diff --git a/gfx/layers/apz/public/APZPublicUtils.h b/gfx/layers/apz/public/APZPublicUtils.h index 6433008b4c..47a07c8fd6 100644 --- a/gfx/layers/apz/public/APZPublicUtils.h +++ b/gfx/layers/apz/public/APZPublicUtils.h @@ -55,9 +55,8 @@ const ScreenMargin CalculatePendingDisplayPort( * between 1 and 8 inclusive. The multiplier is chosen based on the provided * base size, such that multiplier is larger when the base size is larger. * The exact details are somewhat arbitrary and tuned by hand. - * This function is intended to only be used with WebRender, because that is - * the codepath that wants to use a larger displayport alignment, because - * moving the displayport is relatively expensive with WebRender. + * We use a large displayport alignment because moving the displayport is + * relatively expensive with WebRender. */ gfx::IntSize GetDisplayportAlignmentMultiplier(const ScreenSize& aBaseSize); diff --git a/gfx/layers/apz/public/GeckoContentControllerTypes.h b/gfx/layers/apz/public/GeckoContentControllerTypes.h index 8ab478eab5..8616455137 100644 --- a/gfx/layers/apz/public/GeckoContentControllerTypes.h +++ b/gfx/layers/apz/public/GeckoContentControllerTypes.h @@ -24,7 +24,7 @@ MOZ_DEFINE_ENUM_CLASS(GeckoContentController_APZStateChange, ( eTransformEnd, /** * APZ started a touch. - * |aArg| is 1 if touch can be a pan, 0 otherwise. + * |aArg| is 1 if touch can be a pan or zoom, 0 otherwise. */ eStartTouch, /** @@ -33,7 +33,7 @@ MOZ_DEFINE_ENUM_CLASS(GeckoContentController_APZStateChange, ( eStartPanning, /** * APZ finished processing a touch. - * |aArg| is 1 if touch was a click, 0 otherwise. + * |aArg| is a `apz::SingleTapState` defined in APZUtils.h. */ eEndTouch )); diff --git a/gfx/layers/apz/src/APZCTreeManager.cpp b/gfx/layers/apz/src/APZCTreeManager.cpp index ef3cde3596..bc0b6a8dc6 100644 --- a/gfx/layers/apz/src/APZCTreeManager.cpp +++ b/gfx/layers/apz/src/APZCTreeManager.cpp @@ -145,8 +145,7 @@ struct APZCTreeManager::TreeBuildingState { // root node of the layers (sub-)tree, which may not be same as the RCD node // for the subtree, and so we need this mechanism to ensure it gets propagated // to the RCD's APZC instance. Once it is set on the APZC instance, the value - // is cleared back to Nothing(). Note that this is only used in the WebRender - // codepath. + // is cleared back to Nothing(). Maybe<uint64_t> mZoomAnimationId; // See corresponding members of APZCTreeManager. These are the same thing, but @@ -538,12 +537,9 @@ APZCTreeManager::UpdateHitTestingTree(const WebRenderScrollDataWrapper& aRoot, AsyncPanZoomController* apzc = node->GetApzc(); aLayerMetrics.SetApzc(apzc); - // GetScrollbarAnimationId is only set when webrender is enabled, - // which limits the extra thumb mapping work to the webrender-enabled - // case where it is needed. - // Note also that when webrender is enabled, a "valid" animation id - // is always nonzero, so we don't need to worry about handling the - // case where WR is enabled and the animation id is zero. + // Note that a "valid" animation id is always nonzero, so we don't + // need to worry about handling the case where the animation id is + // zero. if (node->GetScrollbarAnimationId()) { if (node->IsScrollThumbNode()) { state.mScrollThumbs.push_back(node); @@ -555,11 +551,9 @@ APZCTreeManager::UpdateHitTestingTree(const WebRenderScrollDataWrapper& aRoot, } } - // GetFixedPositionAnimationId is only set when webrender is enabled. if (node->GetFixedPositionAnimationId().isSome()) { state.mFixedPositionInfo.emplace_back(node); } - // GetStickyPositionAnimationId is only set when webrender is enabled. if (node->GetStickyPositionAnimationId().isSome()) { state.mStickyPositionInfo.emplace_back(node); } @@ -2290,8 +2284,7 @@ void APZCTreeManager::SetupScrollbarDrag( // Under some conditions, we can confirm the drag block right away. // Otherwise, we have to wait for a main-thread confirmation. - if (StaticPrefs::apz_drag_initial_enabled() && - // check that the scrollbar's target scroll frame is layerized + if (/* check that the scrollbar's target scroll frame is layerized */ aScrollThumbNode->GetScrollTargetId() == aApzc->GetGuid().mScrollId && !aApzc->IsScrollInfoLayer()) { uint64_t dragBlockId = dragBlock->GetBlockId(); @@ -3538,12 +3531,6 @@ already_AddRefed<AsyncPanZoomController> APZCTreeManager::CommonAncestor( } bool APZCTreeManager::IsFixedToRootContent( - const HitTestingTreeNode* aNode) const { - MutexAutoLock lock(mMapLock); - return IsFixedToRootContent(FixedPositionInfo(aNode), lock); -} - -bool APZCTreeManager::IsFixedToRootContent( const FixedPositionInfo& aFixedInfo, const MutexAutoLock& aProofOfMapLock) const { ScrollableLayerGuid::ViewID fixedTarget = aFixedInfo.mFixedPosTarget; diff --git a/gfx/layers/apz/src/APZCTreeManager.h b/gfx/layers/apz/src/APZCTreeManager.h index 71d35fd5a6..939e7572a4 100644 --- a/gfx/layers/apz/src/APZCTreeManager.h +++ b/gfx/layers/apz/src/APZCTreeManager.h @@ -199,12 +199,10 @@ class APZCTreeManager : public IAPZCTreeManager, public APZInputBridge { LayersId aOriginatingLayersId, uint32_t aPaintSequenceNumber); /** - * Called when webrender is enabled, from the sampler thread. This function - * populates the provided transaction with any async scroll offsets needed. - * It also advances APZ animations to the specified sample time, and requests - * another composite if there are still active animations. - * In effect it is the webrender equivalent of (part of) the code in - * AsyncCompositionManager. + * Called from the sampler thread. This function populates the provided + * transaction with any async scroll offsets needed. It also advances APZ + * animations to the specified sample time, and requests another composite if + * there are still active animations. */ void SampleForWebRender(const Maybe<VsyncId>& aVsyncId, wr::TransactionWrapper& aTxn, @@ -509,7 +507,7 @@ class APZCTreeManager : public IAPZCTreeManager, public APZInputBridge { void AssertOnUpdaterThread(); // Returns a pointer to the WebRenderAPI this APZCTreeManager is for. - // This might be null (for example, if WebRender is not enabled). + // This might be null (for example, during GTests). already_AddRefed<wr::WebRenderAPI> GetWebRenderAPI() const; protected: @@ -679,12 +677,8 @@ class APZCTreeManager : public IAPZCTreeManager, public APZInputBridge { struct FixedPositionInfo; struct StickyPositionInfo; - // Returns true if |aNode| is a fixed layer that is fixed to the root content - // APZC. - // The map lock is required within these functions; if the map lock is already - // being held by the caller, the second overload should be used. If the map - // lock is not being held at the call site, the first overload should be used. - bool IsFixedToRootContent(const HitTestingTreeNode* aNode) const; + // Returns true if |aFixedInfo| represents a layer that is fixed to the root + // content APZC. bool IsFixedToRootContent(const FixedPositionInfo& aFixedInfo, const MutexAutoLock& aProofOfMapLock) const; @@ -919,15 +913,14 @@ class APZCTreeManager : public IAPZCTreeManager, public APZInputBridge { } }; /** - * If this APZCTreeManager is being used with WebRender, this vector gets - * populated during a layers update. It holds a package of information needed - * to compute and set the async transforms on scroll thumbs. This information - * is extracted from the HitTestingTreeNodes for the WebRender case because - * accessing the HitTestingTreeNodes requires holding the tree lock which - * we cannot do on the WR sampler thread. mScrollThumbInfo, however, can + * This vector gets populated during a layers update. It holds a package of + * information needed to compute and set the async transforms on scroll + * thumbs. This information is extracted from the HitTestingTreeNodes because + * accessing the HitTestingTreeNodes requires holding the tree lock which we + * cannot do on the WebRender sampler thread. mScrollThumbInfo, however, can * be accessed while just holding the mMapLock which is safe to do on the - * sampler thread. - * mMapLock must be acquired while accessing or modifying mScrollThumbInfo. + * sampler thread. mMapLock must be acquired while accessing or modifying + * mScrollThumbInfo. */ std::vector<ScrollThumbInfo> mScrollThumbInfo; @@ -945,12 +938,11 @@ class APZCTreeManager : public IAPZCTreeManager, public APZInputBridge { mScrollDirection(aScrollDirection) {} }; /** - * If this APZCTreeManager is being used with WebRender, this vector gets - * populated during a layers update. It holds a package of information needed - * to compute and set the async transforms on root scrollbars. This - * information is extracted from the HitTestingTreeNodes for the WebRender - * case because accessing the HitTestingTreeNodes requires holding the tree - * lock which we cannot do on the WR sampler thread. mRootScrollbarInfo, + * This vector gets populated during a layers update. It holds a package of + * information needed to compute and set the async transforms on root + * scrollbars. This information is extracted from the HitTestingTreeNodes + * because accessing the HitTestingTreeNodes requires holding the tree lock + * which we cannot do on the WebRender sampler thread. mRootScrollbarInfo, * however, can be accessed while just holding the mMapLock which is safe to * do on the sampler thread. * mMapLock must be acquired while accessing or modifying mRootScrollbarInfo. @@ -970,15 +962,14 @@ class APZCTreeManager : public IAPZCTreeManager, public APZInputBridge { explicit FixedPositionInfo(const HitTestingTreeNode* aNode); }; /** - * If this APZCTreeManager is being used with WebRender, this vector gets - * populated during a layers update. It holds a package of information needed - * to compute and set the async transforms on fixed position content. This - * information is extracted from the HitTestingTreeNodes for the WebRender - * case because accessing the HitTestingTreeNodes requires holding the tree - * lock which we cannot do on the WR sampler thread. mFixedPositionInfo, - * however, can be accessed while just holding the mMapLock which is safe to - * do on the sampler thread. mMapLock must be acquired while accessing or - * modifying mFixedPositionInfo. + * This vector gets populated during a layers update. It holds a package of + * information needed to compute and set the async transforms on fixed + * position content. This information is extracted from the + * HitTestingTreeNodes because accessing the HitTestingTreeNodes requires + * holding the tree lock which we cannot do on the WebRender sampler thread. + * mFixedPositionInfo, however, can be accessed while just holding the + * mMapLock which is safe to do on the sampler thread. mMapLock must be + * acquired while accessing or modifying mFixedPositionInfo. */ std::vector<FixedPositionInfo> mFixedPositionInfo; @@ -997,15 +988,14 @@ class APZCTreeManager : public IAPZCTreeManager, public APZInputBridge { explicit StickyPositionInfo(const HitTestingTreeNode* aNode); }; /** - * If this APZCTreeManager is being used with WebRender, this vector gets - * populated during a layers update. It holds a package of information needed - * to compute and set the async transforms on sticky position content. This - * information is extracted from the HitTestingTreeNodes for the WebRender - * case because accessing the HitTestingTreeNodes requires holding the tree - * lock which we cannot do on the WR sampler thread. mStickyPositionInfo, - * however, can be accessed while just holding the mMapLock which is safe to - * do on the sampler thread. mMapLock must be acquired while accessing or - * modifying mStickyPositionInfo. + * This vector gets populated during a layers update. It holds a package of + * information needed to compute and set the async transforms on sticky + * position content. This information is extracted from the + * HitTestingTreeNodes because accessing the HitTestingTreeNodes requires + * holding the tree lock which we cannot do on the WebRender sampler thread. + * mStickyPositionInfo, however, can be accessed while just holding the + * mMapLock which is safe to do on the sampler thread. mMapLock must be + * acquired while accessing or modifying mStickyPositionInfo. */ std::vector<StickyPositionInfo> mStickyPositionInfo; diff --git a/gfx/layers/apz/src/APZSampler.cpp b/gfx/layers/apz/src/APZSampler.cpp index d0e251cec4..e14535da0d 100644 --- a/gfx/layers/apz/src/APZSampler.cpp +++ b/gfx/layers/apz/src/APZSampler.cpp @@ -125,19 +125,16 @@ AsyncTransform APZSampler::GetCurrentAsyncTransform( ParentLayerRect APZSampler::GetCompositionBounds( const LayersId& aLayersId, const ScrollableLayerGuid::ViewID& aScrollId, const MutexAutoLock& aProofOfMapLock) const { - // This function can get called on the compositor in case of non WebRender - // get called on the sampler thread in case of WebRender. AssertOnSamplerThread(); RefPtr<AsyncPanZoomController> apzc = mApz->GetTargetAPZC(aLayersId, aScrollId, aProofOfMapLock); if (!apzc) { - // On WebRender it's possible that this function can get called even after - // the target APZC has been already destroyed because destroying the - // animation which triggers this function call is basically processed later - // than the APZC one, i.e. queue mCompositorAnimationsToDelete in - // WebRenderBridgeParent and then remove them in - // WebRenderBridgeParent::RemoveEpochDataPriorTo. + // It's possible that this function can get called even after the target + // APZC has been already destroyed because destroying the animation which + // triggers this function call is basically processed later than the APZC + // one, i.e. queue mCompositorAnimationsToDelete in WebRenderBridgeParent + // and then remove them in WebRenderBridgeParent::RemoveEpochDataPriorTo. return ParentLayerRect(); } diff --git a/gfx/layers/apz/src/APZUtils.h b/gfx/layers/apz/src/APZUtils.h index 6614b6eeae..f36a01f65d 100644 --- a/gfx/layers/apz/src/APZUtils.h +++ b/gfx/layers/apz/src/APZUtils.h @@ -233,6 +233,14 @@ bool AboutToCheckerboard(const FrameMetrics& aPaintedMetrics, */ SideBits GetOverscrollSideBits(const ParentLayerPoint& aOverscrollAmount); +// Represents tri-state when a touch-end event received. +enum class SingleTapState : uint8_t { + NotClick, // The touch-block doesn't trigger a click event + WasClick, // The touch-block did trigger a click event + NotYetDetermined, // It's not yet determined whether the touch-block trigger + // a click event or not since double-tapping might happen +}; + } // namespace apz } // namespace layers diff --git a/gfx/layers/apz/src/AsyncPanZoomController.cpp b/gfx/layers/apz/src/AsyncPanZoomController.cpp index edbd2ecffa..a070340421 100644 --- a/gfx/layers/apz/src/AsyncPanZoomController.cpp +++ b/gfx/layers/apz/src/AsyncPanZoomController.cpp @@ -242,11 +242,6 @@ typedef PlatformSpecificStateBase * Setting this pref to true will cause APZ to handle mouse-dragging of * scrollbar thumbs. * - * \li\b apz.drag.initial.enabled - * Setting this pref to true will cause APZ to try to handle mouse-dragging - * of scrollbar thumbs without an initial round-trip to content to start it - * if possible. Only has an effect if apz.drag.enabled is also true. - * * \li\b apz.drag.touch.enabled * Setting this pref to true will cause APZ to handle touch-dragging of * scrollbar thumbs. Only has an effect if apz.drag.enabled is also true. @@ -1310,10 +1305,13 @@ nsEventStatus AsyncPanZoomController::OnTouchStart( if (RefPtr<GeckoContentController> controller = GetGeckoContentController()) { MOZ_ASSERT(GetCurrentTouchBlock()); - controller->NotifyAPZStateChange( - GetGuid(), APZStateChange::eStartTouch, + const bool canBePanOrZoom = GetCurrentTouchBlock()->GetOverscrollHandoffChain()->CanBePanned( - this), + this) || + (ZoomConstraintsAllowDoubleTapZoom() && + GetCurrentTouchBlock()->TouchActionAllowsDoubleTapZoom()); + controller->NotifyAPZStateChange( + GetGuid(), APZStateChange::eStartTouch, canBePanOrZoom, Some(GetCurrentTouchBlock()->GetBlockId())); } mLastTouch.mTimeStamp = mTouchStartTime = aEvent.mTimeStamp; @@ -3112,7 +3110,7 @@ nsEventStatus AsyncPanZoomController::GenerateSingleTap( // touch block caused a `click` event or not, thus for long-tap events, // it's not necessary. if (aType != TapType::eLongTapUp) { - touch->SetSingleTapOccurred(); + touch->SetSingleTapState(apz::SingleTapState::WasClick); } } // Because this may be being running as part of @@ -3143,7 +3141,7 @@ void AsyncPanZoomController::OnTouchEndOrCancel() { MOZ_ASSERT(GetCurrentTouchBlock()); controller->NotifyAPZStateChange( GetGuid(), APZStateChange::eEndTouch, - GetCurrentTouchBlock()->SingleTapOccurred(), + static_cast<int>(GetCurrentTouchBlock()->SingleTapState()), Some(GetCurrentTouchBlock()->GetBlockId())); } } @@ -3160,6 +3158,21 @@ nsEventStatus AsyncPanZoomController::OnSingleTapUp( return GenerateSingleTap(TapType::eSingleTap, aEvent.mPoint, aEvent.modifiers); } + + // Ignore the event if it does not have valid local coordinates. + // GenerateSingleTap will not send a tap in this case. + if (!ConvertToGecko(aEvent.mPoint)) { + return nsEventStatus_eIgnore; + } + + // Here we need to wait for the call to OnSingleTapConfirmed, we need to tell + // it to ActiveElementManager so that we can do element activation once + // ActiveElementManager got a single tap event later. + if (TouchBlockState* touch = GetCurrentTouchBlock()) { + if (!touch->IsDuringFastFling()) { + touch->SetSingleTapState(apz::SingleTapState::NotYetDetermined); + } + } return nsEventStatus_eIgnore; } @@ -5304,10 +5317,10 @@ void AsyncPanZoomController::UpdateCheckerboardEvent( const MutexAutoLock& aProofOfLock, uint32_t aMagnitude) { if (mCheckerboardEvent && mCheckerboardEvent->RecordFrameInfo(aMagnitude)) { // This checkerboard event is done. Report some metrics to telemetry. - mozilla::glean::gfx_checkerboard::severity.AccumulateSamples( - {mCheckerboardEvent->GetSeverity()}); - mozilla::glean::gfx_checkerboard::peak_pixel_count.AccumulateSamples( - {mCheckerboardEvent->GetPeak()}); + mozilla::glean::gfx_checkerboard::severity.AccumulateSingleSample( + mCheckerboardEvent->GetSeverity()); + mozilla::glean::gfx_checkerboard::peak_pixel_count.AccumulateSingleSample( + mCheckerboardEvent->GetPeak()); mozilla::glean::gfx_checkerboard::duration.AccumulateRawDuration( mCheckerboardEvent->GetDuration()); diff --git a/gfx/layers/apz/src/AsyncPanZoomController.h b/gfx/layers/apz/src/AsyncPanZoomController.h index d0c4537a66..5db831d1bf 100644 --- a/gfx/layers/apz/src/AsyncPanZoomController.h +++ b/gfx/layers/apz/src/AsyncPanZoomController.h @@ -1133,13 +1133,12 @@ class AsyncPanZoomController { UniquePtr<OverscrollEffectBase> mOverscrollEffect; - // Zoom animation id, used for zooming in WebRender. This should only be - // set on the APZC instance for the root content document (i.e. the one we - // support zooming on), and is only used if WebRender is enabled. The - // animation id itself refers to the transform animation id that was set on - // the stacking context in the WR display list. By changing the transform - // associated with this id, we can adjust the scaling that WebRender applies, - // thereby controlling the zoom. + // Zoom animation id, used for zooming. This should only be set on the APZC + // instance for the root content document (i.e. the one we support zooming + // on). The animation id itself refers to the transform animation id that was + // set on the stacking context in the WR display list. By changing the + // transform associated with this id, we can adjust the scaling that WebRender + // applies, thereby controlling the zoom. Maybe<uint64_t> mZoomAnimationId; // Position on screen where user first put their finger down. diff --git a/gfx/layers/apz/src/HitTestingTreeNode.h b/gfx/layers/apz/src/HitTestingTreeNode.h index a4958b1af5..b76f317825 100644 --- a/gfx/layers/apz/src/HitTestingTreeNode.h +++ b/gfx/layers/apz/src/HitTestingTreeNode.h @@ -182,16 +182,16 @@ class HitTestingTreeNode { LayersId mLayersId; - // This is only set if WebRender is enabled, and only for HTTNs - // where IsScrollThumbNode() returns true. It holds the animation id that we - // use to move the thumb node to reflect async scrolling. + // This is only set for HTTNs where IsScrollThumbNode() returns true. It holds + // the animation id that we use to move the thumb node to reflect async + // scrolling. Maybe<uint64_t> mScrollbarAnimationId; // This is set for scrollbar Container and Thumb layers. ScrollbarData mScrollbarData; - // This is only set if WebRender is enabled. It holds the animation id that - // we use to adjust fixed position content for the toolbar. + // This holds the animation id that we use to adjust fixed position content + // for the toolbar. Maybe<uint64_t> mFixedPositionAnimationId; ScrollableLayerGuid::ViewID mFixedPosTarget; @@ -200,8 +200,8 @@ class HitTestingTreeNode { ScrollableLayerGuid::ViewID mStickyPosTarget; LayerRectAbsolute mStickyScrollRangeOuter; LayerRectAbsolute mStickyScrollRangeInner; - // This is only set if WebRender is enabled. It holds the animation id that - // we use to adjust sticky position content for the toolbar. + // This holds the animation id that we use to adjust sticky position content + // for the toolbar. Maybe<uint64_t> mStickyPositionAnimationId; LayerIntRect mVisibleRect; diff --git a/gfx/layers/apz/src/InputBlockState.cpp b/gfx/layers/apz/src/InputBlockState.cpp index 367fbf9e90..785df56c10 100644 --- a/gfx/layers/apz/src/InputBlockState.cpp +++ b/gfx/layers/apz/src/InputBlockState.cpp @@ -16,6 +16,7 @@ #include "mozilla/StaticPrefs_test.h" #include "mozilla/Telemetry.h" // for Telemetry #include "mozilla/ToString.h" +#include "mozilla/layers/APZEventState.h" #include "mozilla/layers/IAPZCTreeManager.h" // for AllowedTouchBehavior #include "OverscrollHandoffState.h" #include "QueuedInput.h" @@ -636,12 +637,12 @@ TouchBlockState::TouchBlockState( : CancelableBlockState(aTargetApzc, aFlags), mAllowedTouchBehaviorSet(false), mDuringFastFling(false), - mSingleTapOccurred(false), mInSlop(false), mForLongTap(false), mLongTapWasProcessed(false), mIsWaitingLongTapResult(false), mNeedsWaitTouchMove(false), + mSingleTapState(apz::SingleTapState::NotClick), mTouchCounter(aCounter), mStartTime(GetTargetApzc()->GetFrameTime().Time()) { mOriginalTargetConfirmedState = mTargetConfirmed; @@ -700,13 +701,12 @@ void TouchBlockState::SetDuringFastFling() { bool TouchBlockState::IsDuringFastFling() const { return mDuringFastFling; } -void TouchBlockState::SetSingleTapOccurred() { - TBS_LOG("%p setting single-tap-occurred flag\n", this); - mSingleTapOccurred = true; +void TouchBlockState::SetSingleTapState(apz::SingleTapState aState) { + TBS_LOG("%p setting single-tap-state: %d\n", this, + static_cast<uint8_t>(aState)); + mSingleTapState = aState; } -bool TouchBlockState::SingleTapOccurred() const { return mSingleTapOccurred; } - bool TouchBlockState::MustStayActive() { // If this touch block is for long-tap, it doesn't need to be active after the // block was processed, it will be taken over by the original touch block diff --git a/gfx/layers/apz/src/InputBlockState.h b/gfx/layers/apz/src/InputBlockState.h index f20a4a5901..d65b1cb57b 100644 --- a/gfx/layers/apz/src/InputBlockState.h +++ b/gfx/layers/apz/src/InputBlockState.h @@ -454,14 +454,12 @@ class TouchBlockState : public CancelableBlockState { */ bool IsDuringFastFling() const; /** - * Set the single-tap-occurred flag that indicates that this touch block - * triggered a single tap event. + * Set the single-tap state flag that indicates that this touch block + * triggered (1) a click, (2) not a click, or (3) not yet sure it will trigger + * a click or not. */ - void SetSingleTapOccurred(); - /** - * @return true iff the single-tap-occurred flag is set on this block. - */ - bool SingleTapOccurred() const; + void SetSingleTapState(apz::SingleTapState aState); + apz::SingleTapState SingleTapState() const { return mSingleTapState; } /** * @return false iff touch-action is enabled and the allowed touch behaviors @@ -537,7 +535,6 @@ class TouchBlockState : public CancelableBlockState { nsTArray<TouchBehaviorFlags> mAllowedTouchBehaviors; bool mAllowedTouchBehaviorSet; bool mDuringFastFling; - bool mSingleTapOccurred; bool mInSlop; // A long tap involves two touch blocks: the original touch // block containing the `touchstart`, and a second one @@ -557,6 +554,7 @@ class TouchBlockState : public CancelableBlockState { // content response for a touch move event. It will be set just before // triggering a long-press event. bool mNeedsWaitTouchMove; + apz::SingleTapState mSingleTapState; ScreenIntPoint mSlopOrigin; // A reference to the InputQueue's touch counter TouchCounter& mTouchCounter; diff --git a/gfx/layers/apz/src/SampledAPZCState.cpp b/gfx/layers/apz/src/SampledAPZCState.cpp index 712a46a3b1..8cdd905aba 100644 --- a/gfx/layers/apz/src/SampledAPZCState.cpp +++ b/gfx/layers/apz/src/SampledAPZCState.cpp @@ -86,6 +86,8 @@ void SampledAPZCState::RemoveFractionalAsyncDelta() { // a snapshot of APZ state (decoupling it from APZ assumptions) and provides // it as an input to the compositor (so all compositor state should be // internally consistent based on this input). + // TODO(bug 1889267): Now that we use WebRender everywhere, can this hack be + // removed? if (mLayoutViewport.TopLeft() == mVisualScrollOffset) { return; } diff --git a/gfx/layers/apz/test/mochitest/helper_bug1669625.html b/gfx/layers/apz/test/mochitest/helper_bug1669625.html index 95d2a4bc2c..4a91c7f0b6 100644 --- a/gfx/layers/apz/test/mochitest/helper_bug1669625.html +++ b/gfx/layers/apz/test/mochitest/helper_bug1669625.html @@ -11,8 +11,7 @@ <script type="application/javascript"> async function test() { - if (SpecialPowers.getBoolPref("apz.force_disable_desktop_zooming_scrollbars") || - getPlatform() == "android") { + if (getPlatform() == "android") { return; } diff --git a/gfx/layers/apz/test/mochitest/helper_bug1806400-2.html b/gfx/layers/apz/test/mochitest/helper_bug1806400-2.html new file mode 100644 index 0000000000..8d2eeca58d --- /dev/null +++ b/gfx/layers/apz/test/mochitest/helper_bug1806400-2.html @@ -0,0 +1,58 @@ +<!DOCTYPE html> +<html> +<meta name="viewport" content="width=device-width; initial-scale=0.4"> +<title>Tests that double-tap-to-zoom never activates elements inside a scrollable container</title> +<script src="/tests/SimpleTest/SimpleTest.js"></script> + <script src="/tests/SimpleTest/paint_listener.js"></script> +<script src="apz_test_utils.js"></script> +<script src="apz_test_native_event_utils.js"></script> +<style> +#scrollable { + height: 50vh; + width: 50vw; + background: yellow; + overflow: scroll; +} + +#scrollabletarget { + height: 200vh; + width: 200vh; + background: green; +} + +#scrollabletarget:active { + background: red; +} + +</style> +<div id="scrollable"> + <div id="scrollabletarget"> + </div> +</div> +<script> +async function test() { + ok(!scrollabletarget.matches(":active"), "should not be active initially"); + + let rAFID = requestAnimationFrame(function ensureInactive() { + let isActive = scrollabletarget.matches(":active"); + ok(!isActive, "Element activation should never happen!"); + if (!isActive) { + rAFID = requestAnimationFrame(ensureInactive); + } + }); + + await doubleTapOn(scrollabletarget, 50, 50, false /* useTouchpad */); + + cancelAnimationFrame(rAFID); +} + +if (getPlatform() != "mac" && getPlatform() != "android") { + ok(true, "Skipping test because double-tap-zoom isn't allowed on " + getPlatform()); + subtestDone(); +} else { + waitUntilApzStable() + .then(test) + .then(subtestDone, subtestFailed); +} +</script> +</html> diff --git a/gfx/layers/apz/test/mochitest/helper_bug1806400-3.html b/gfx/layers/apz/test/mochitest/helper_bug1806400-3.html new file mode 100644 index 0000000000..2e5398119c --- /dev/null +++ b/gfx/layers/apz/test/mochitest/helper_bug1806400-3.html @@ -0,0 +1,49 @@ +<!DOCTYPE html> +<html> +<meta name="viewport" content="width=device-width; initial-scale=1.0"> +<title>Tests that :active state is changed on a scrollable container without any touch event listeners</title> +<script src="/tests/SimpleTest/SimpleTest.js"></script> + <script src="/tests/SimpleTest/paint_listener.js"></script> +<script src="apz_test_utils.js"></script> +<script src="apz_test_native_event_utils.js"></script> +<style> +#scrollable { + height: 50vh; + width: 50vw; + background: yellow; + overflow: scroll; +} + +#scrollabletarget { + height: 200vh; + width: 200vh; + background: green; +} + +#scrollabletarget:active { + background: red; +} + +</style> +<div id="scrollable"> + <div id="scrollabletarget"> + </div> +</div> +<script> +async function test() { + ok(!scrollabletarget.matches(":active"), "should not be active initially"); + + await synthesizeNativeTap(scrollabletarget, 50, 50); + + // In JS there's no way to ensure `APZStateChange::eStartTouch` notification + // has been processed. So we wait for `:active` state change here. + await SimpleTest.promiseWaitForCondition( + () => scrollabletarget.matches(":active"), + "Waiting for :active state change"); + ok(scrollabletarget.matches(":active"), "should be active"); +} +waitUntilApzStable() +.then(test) +.then(subtestDone, subtestFailed); +</script> +</html> diff --git a/gfx/layers/apz/test/mochitest/helper_bug1806400-4.html b/gfx/layers/apz/test/mochitest/helper_bug1806400-4.html new file mode 100644 index 0000000000..46fa95b651 --- /dev/null +++ b/gfx/layers/apz/test/mochitest/helper_bug1806400-4.html @@ -0,0 +1,49 @@ +<!DOCTYPE html> +<html> +<meta name="viewport" content="width=device-width; initial-scale=0.4"> +<title>Tests that double-tap-to-zoom never activates elements inside non scrollable container</title> +<script src="/tests/SimpleTest/SimpleTest.js"></script> + <script src="/tests/SimpleTest/paint_listener.js"></script> +<script src="apz_test_utils.js"></script> +<script src="apz_test_native_event_utils.js"></script> +<style> +#nonscrollabletarget { + height: 300px; + width: 300px; + background: green; +} + +#nonscrollabletarget:active { + background: red; +} + +</style> +<div id="nonscrollabletarget"> +</div> +<script> +async function test() { + ok(!nonscrollabletarget.matches(":active"), "should not be active initially"); + + let rAFID = requestAnimationFrame(function ensureInactive() { + let isActive = nonscrollabletarget.matches(":active"); + ok(!isActive, "Element activation should never happen!"); + if (!isActive) { + rAFID = requestAnimationFrame(ensureInactive); + } + }); + + await doubleTapOn(nonscrollabletarget, 50, 50, false /* useTouchpad */); + + cancelAnimationFrame(rAFID); +} + +if (getPlatform() != "mac" && getPlatform() != "android") { + ok(true, "Skipping test because double-tap-zoom isn't allowed on " + getPlatform()); + subtestDone(); +} else { + waitUntilApzStable() + .then(test) + .then(subtestDone, subtestFailed); +} +</script> +</html> diff --git a/gfx/layers/apz/test/mochitest/helper_bug1806400.html b/gfx/layers/apz/test/mochitest/helper_bug1806400.html new file mode 100644 index 0000000000..03be0c8535 --- /dev/null +++ b/gfx/layers/apz/test/mochitest/helper_bug1806400.html @@ -0,0 +1,54 @@ +<!DOCTYPE html> +<html> +<meta name="viewport" content="width=device-width; initial-scale=1.0"> +<title>Tests that :active state is changed with `touchstart` event listener</title> +<script src="/tests/SimpleTest/SimpleTest.js"></script> + <script src="/tests/SimpleTest/paint_listener.js"></script> +<script src="apz_test_utils.js"></script> +<script src="apz_test_native_event_utils.js"></script> +<style> + #button { + width: 100px; + height: 100px; + } +</style> +<button id="button">Button</button> +<script> +async function test() { + // Set up an active touchstart event listner. + let eventPromise = promiseOneEvent(document.documentElement, "touchstart"); + await promiseApzFlushedRepaints(); + + await synthesizeNativeTouch(button, 10, 10, SpecialPowers.DOMWindowUtils.TOUCH_CONTACT); + await eventPromise; + + // In JS there's no way to ensure `APZStateChange::eStartTouch` notification + // has been processed. So we wait for `:active` state change here. + await SimpleTest.promiseWaitForCondition( + () => button.matches(":active"), + "Waiting for :active state change"); + ok(button.matches(":active"), "should be active"); + + eventPromise = promiseOneEvent(button, "touchend"); + await synthesizeNativeTouch(button, 10, 10, SpecialPowers.DOMWindowUtils.TOUCH_REMOVE); + await eventPromise; + + // Same as above. We need to wait for not `:active` state here. + await SimpleTest.promiseWaitForCondition( + () => !button.matches(":active"), + "Waiting for :active state change"); + ok(!button.matches(":active"), "should not be active"); +} + +if (getPlatform() == "windows") { + // Bug 1875916. On Windows synthesizeNativeTouch(TOUCH_REMOVE) causes + // `InjectTouchInput failure` with ERROR_TIMEOUT. + ok(true, "Test doesn't need to run on Windows"); + subtestDone(); +} else { + waitUntilApzStable() + .then(test) + .then(subtestDone, subtestFailed); +} +</script> +</html> diff --git a/gfx/layers/apz/test/mochitest/helper_hittest_iframe_perspective-4.html b/gfx/layers/apz/test/mochitest/helper_hittest_iframe_perspective-4.html new file mode 100644 index 0000000000..96ea0d3b09 --- /dev/null +++ b/gfx/layers/apz/test/mochitest/helper_hittest_iframe_perspective-4.html @@ -0,0 +1,86 @@ +<!DOCTYPE html> +<html> +<!-- +https://bugzilla.mozilla.org/show_bug.cgi?id=1888904 +--> +<head> + <meta charset="utf-8"> + <meta name="viewport" content="width=device-width; initial-scale=1.0"> + <title>Test that events are delivered with correct coordinates to an iframe inide a no-op perspective transform</title> + <script src="apz_test_native_event_utils.js"></script> + <script src="apz_test_utils.js"></script> + <script src="/tests/SimpleTest/paint_listener.js"></script> + <style> + html, body { + margin: 0; + padding: 0; + } + iframe { + border: 0; + background-color: blue; + } + .modal-dialog { + position: absolute; + top: 500px; + left: 500px; + transform: translate(-50%, -50%); + border: 1px solid black; + } + .item { + perspective: 1000px; + transform: translate3d(0, 0, 0); + } + .g-recaptcha { + transform-origin: 0 0; + transform: scale(0.91); + } + </style> +</head> +<body> + <div class="modal-dialog"> + <div class="item"> + <div class="g-recaptcha"> + <iframe id="iframe" src="https://example.com/tests/gfx/layers/apz/test/mochitest/helper_hittest_iframe_perspective_child.html"></iframe> + </div> + </div> + </div> + </div> + </div> + <script type="application/javascript"> +async function test() { + // Wait for iframe to receive content transforms. + await SpecialPowers.spawn(iframe, [], async () => { + await SpecialPowers.contentTransformsReceived(content.window); + }); + + let clickCoordsInChild = { + offsetX: 0, + offsetY: 0 + }; + let childMessagePromise = new Promise(resolve => { + window.addEventListener("message", event => { + let data = JSON.parse(event.data); + if ("type" in data && data.type == "got-mouse-down") { + clickCoordsInChild = data.coords; + resolve(); + } + }) + }); + await synthesizeNativeMouseEventWithAPZ({ + type: "click", + target: iframe, + offsetX: 100, + offsetY: 100 + }); + await childMessagePromise; + is(clickCoordsInChild.offsetX, 110 /* 100 / 0.91 */, "x coordinate is correct"); + is(clickCoordsInChild.offsetY, 110 /* 100 / 0.91 */, "y coordinate is correct"); +} + +waitUntilApzStable() +.then(test) +.then(subtestDone, subtestFailed); + + </script> +</body> +</html> diff --git a/gfx/layers/apz/test/mochitest/helper_hittest_pointerevents_svg.html b/gfx/layers/apz/test/mochitest/helper_hittest_pointerevents_svg.html index 22b880736d..3b8a7cef3e 100644 --- a/gfx/layers/apz/test/mochitest/helper_hittest_pointerevents_svg.html +++ b/gfx/layers/apz/test/mochitest/helper_hittest_pointerevents_svg.html @@ -132,10 +132,10 @@ async function test() { `bottom left of scroller in testcase ${testId}`); } - // With the first two cases (circle masks) both WebRender and non-WebRender - // emit dispatch-to-content regions for the right side, so for now we just - // test for that. Eventually WebRender should be able to stop emitting DTC - // and we can update this test to be more precise in that case. + // With the first two cases (circle masks) WebRender emits dispatch-to-content + // regions for the right side, so for now we just test for that. + // Eventually WebRender should be able to stop emitting DTC + // and we can update this test to be more precise. // For the two rectangular test cases we get precise results rather than // dispatch-to-content. if (testId == 1 || testId == 2) { diff --git a/gfx/layers/apz/test/mochitest/helper_scrollframe_activation_on_load.html b/gfx/layers/apz/test/mochitest/helper_scrollframe_activation_on_load.html index 1947a89a8f..c3f02d23d9 100644 --- a/gfx/layers/apz/test/mochitest/helper_scrollframe_activation_on_load.html +++ b/gfx/layers/apz/test/mochitest/helper_scrollframe_activation_on_load.html @@ -41,9 +41,8 @@ https://bugzilla.mozilla.org/show_bug.cgi?id=1151663 let config = getHitTestConfig(); let heightMultiplier = SpecialPowers.getCharPref("apz.y_stationary_size_multiplier"); - // With WebRender, the effective height multiplier can be reduced - // for alignment reasons. The reduction should be no more than a - // factor of two. + // The effective height multiplier can be reduced for alignment reasons. + // The reduction should be no more than a factor of two. heightMultiplier /= 2; info("effective displayport height multipler is " + heightMultiplier); diff --git a/gfx/layers/apz/test/mochitest/helper_touch_synthesized_mouseevents.html b/gfx/layers/apz/test/mochitest/helper_touch_synthesized_mouseevents.html index 3930cec3c3..b3d7b4352a 100644 --- a/gfx/layers/apz/test/mochitest/helper_touch_synthesized_mouseevents.html +++ b/gfx/layers/apz/test/mochitest/helper_touch_synthesized_mouseevents.html @@ -76,6 +76,13 @@ async function test() { promiseOneEvent(targetElem, "click"), ]; + // Create a promise for :active state change since in the case where the + // target element is inside a scrollable container, APZ delays :active state + // change, it sometimes happens after all the relavant events above. + const activePromise = SimpleTest.promiseWaitForCondition( + () => targetElem.matches(":active"), + "Waiting for :active state change"); + // Perform a tap gesture await synthesizeNativeTap(targetElem, 50, 50); @@ -88,7 +95,7 @@ async function test() { // The value of ui.touch_activation.duration_ms should be set to // a large value. If we did not delay sending the synthesized // mouse events, this test will not timeout. - await Promise.all(mouseEventPromises); + await Promise.all([...mouseEventPromises, activePromise]); clearTimeout(failTimeout); diff --git a/gfx/layers/apz/test/mochitest/test_group_hittest-3.html b/gfx/layers/apz/test/mochitest/test_group_hittest-3.html index f5675ee790..eac0348b89 100644 --- a/gfx/layers/apz/test/mochitest/test_group_hittest-3.html +++ b/gfx/layers/apz/test/mochitest/test_group_hittest-3.html @@ -33,6 +33,7 @@ var prefs = [ var subtests = [ {"file": "helper_hittest_iframe_perspective.html", "prefs": prefs}, {"file": "helper_hittest_iframe_perspective-3.html", "prefs": prefs}, + {"file": "helper_hittest_iframe_perspective-4.html", "prefs": prefs}, ]; if (isApzEnabled()) { diff --git a/gfx/layers/apz/test/mochitest/test_group_touchevents-5.html b/gfx/layers/apz/test/mochitest/test_group_touchevents-5.html index 0eee77a3ae..e6e4eb40fb 100644 --- a/gfx/layers/apz/test/mochitest/test_group_touchevents-5.html +++ b/gfx/layers/apz/test/mochitest/test_group_touchevents-5.html @@ -9,6 +9,12 @@ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css"/> <script type="application/javascript"> +// Increase the tap timeouts so the double-tap is still detected in case of +// random delays during testing. +var doubletap_prefs = [ + ["ui.click_hold_context_menus.delay", 10000], + ["apz.max_tap_time", 10000], +]; var subtests = [ // tests that scrolling doesn't cause extra SchedulePaint calls @@ -23,6 +29,18 @@ var subtests = [ {"file": "helper_bug1719855.html?prevent=contextmenu"}, {"file": "helper_bug1719855.html"}, {"file": "helper_bug1724759.html"}, + {"file": "helper_bug1806400.html", "prefs": [ + // This test uses `SimpleTest.promiseWaitForCondition` which waits for the + // given condition up to 3s, to avoid opening context menu during the time + // span use a longer `ui.click_hold_context_menus.delay` here. + ["ui.click_hold_context_menus.delay", 10000], + ["ui.touch_activation.duration_ms", 1000] + ]}, + {"file": "helper_bug1806400-2.html", "prefs": doubletap_prefs}, + {"file": "helper_bug1806400-3.html", "prefs": [ + ["ui.touch_activation.duration_ms", 90000] + ]}, + {"file": "helper_bug1806400-4.html", "prefs": doubletap_prefs}, // Add new subtests here. If this starts timing out because it's taking too // long, create a test_group_touchevents-6.html file. Refer to 1423011#c57 // for more details. diff --git a/gfx/layers/apz/test/mochitest/test_layerization.html b/gfx/layers/apz/test/mochitest/test_layerization.html index 0ff76de317..e97971b456 100644 --- a/gfx/layers/apz/test/mochitest/test_layerization.html +++ b/gfx/layers/apz/test/mochitest/test_layerization.html @@ -64,9 +64,8 @@ let config = getHitTestConfig(); let activateAllScrollFrames = config.activateAllScrollFrames; let heightMultiplier = SpecialPowers.getCharPref("apz.y_stationary_size_multiplier"); -// With WebRender, the effective height multiplier can be reduced -// for alignment reasons. The reduction should be no more than a -// factor of two. +// The effective height multiplier can be reduced for alignment reasons. +// The reduction should be no more than a factor of two. heightMultiplier /= 2; info("effective displayport height multipler is " + heightMultiplier); diff --git a/gfx/layers/apz/test/mochitest/test_scroll_inactive_bug1190112.html b/gfx/layers/apz/test/mochitest/test_scroll_inactive_bug1190112.html index de54cf93fe..dd18e078b6 100644 --- a/gfx/layers/apz/test/mochitest/test_scroll_inactive_bug1190112.html +++ b/gfx/layers/apz/test/mochitest/test_scroll_inactive_bug1190112.html @@ -523,8 +523,7 @@ async function test() { // Scroll inner again // Tick the refresh driver once to make sure the compositor has sent the // updated scroll offset for the outer scroller to WebRender, so that the - // hit-test in sendWheelAndPaint takes it into account. (This isn't needed - // if using non-WR layers, but doesn't hurt either). + // hit-test in sendWheelAndPaint takes it into account. var dwu = SpecialPowers.getDOMWindowUtils(window); dwu.advanceTimeAndRefresh(16); dwu.restoreNormalRefresh(); diff --git a/gfx/layers/apz/util/APZEventState.cpp b/gfx/layers/apz/util/APZEventState.cpp index 5db6a08429..c205b09ca2 100644 --- a/gfx/layers/apz/util/APZEventState.cpp +++ b/gfx/layers/apz/util/APZEventState.cpp @@ -27,6 +27,7 @@ #include "mozilla/dom/BrowserChild.h" #include "mozilla/dom/MouseEventBinding.h" #include "mozilla/layers/APZCCallbackHelper.h" +#include "mozilla/layers/APZUtils.h" #include "mozilla/layers/IAPZCTreeManager.h" #include "mozilla/widget/nsAutoRollup.h" #include "nsCOMPtr.h" @@ -101,7 +102,7 @@ APZEventState::APZEventState(nsIWidget* aWidget, mContentReceivedInputBlockCallback(std::move(aCallback)), mPendingTouchPreventedResponse(false), mPendingTouchPreventedBlockId(0), - mEndTouchIsClick(false), + mEndTouchState(apz::SingleTapState::NotClick), mFirstTouchCancelled(false), mTouchEndCancelled(false), mReceivedNonTouchStart(false), @@ -349,11 +350,13 @@ void APZEventState::ProcessTouchEvent( case eTouchEnd: if (isTouchPrevented) { mTouchEndCancelled = true; - mEndTouchIsClick = false; + mEndTouchState = apz::SingleTapState::NotClick; } [[fallthrough]]; case eTouchCancel: - mActiveElementManager->HandleTouchEndEvent(mEndTouchIsClick); + if (mActiveElementManager->HandleTouchEndEvent(mEndTouchState)) { + mEndTouchState = apz::SingleTapState::NotClick; + } [[fallthrough]]; case eTouchMove: { if (!mReceivedNonTouchStart) { @@ -515,13 +518,13 @@ void APZEventState::ProcessAPZStateChange(ViewID aViewId, break; } case APZStateChange::eStartTouch: { - bool canBePan = aArg; - mActiveElementManager->HandleTouchStart(canBePan); + bool canBePanOrZoom = aArg; + mActiveElementManager->HandleTouchStart(canBePanOrZoom); // If this is a non-scrollable content, set a timer for the amount of // time specified by ui.touch_activation.duration_ms to clear the // active element state. - APZES_LOG("%s: can-be-pan=%d", __FUNCTION__, aArg); - if (!canBePan) { + APZES_LOG("%s: can-be-pan-or-zoom=%d", __FUNCTION__, aArg); + if (!canBePanOrZoom) { MOZ_ASSERT(aInputBlockId.isSome()); } break; @@ -532,8 +535,10 @@ void APZEventState::ProcessAPZStateChange(ViewID aViewId, break; } case APZStateChange::eEndTouch: { - mEndTouchIsClick = aArg; - mActiveElementManager->HandleTouchEnd(); + mEndTouchState = static_cast<apz::SingleTapState>(aArg); + if (mActiveElementManager->HandleTouchEnd(mEndTouchState)) { + mEndTouchState = apz::SingleTapState::NotClick; + } break; } } diff --git a/gfx/layers/apz/util/APZEventState.h b/gfx/layers/apz/util/APZEventState.h index 52febc0424..3a57e9e6c6 100644 --- a/gfx/layers/apz/util/APZEventState.h +++ b/gfx/layers/apz/util/APZEventState.h @@ -39,6 +39,10 @@ namespace layers { class ActiveElementManager; +namespace apz { +enum class SingleTapState : uint8_t; +} // namespace apz + typedef std::function<void(uint64_t /* input block id */, bool /* prevent default */)> ContentReceivedInputBlockCallback; @@ -106,7 +110,7 @@ class APZEventState final { bool mPendingTouchPreventedResponse; ScrollableLayerGuid mPendingTouchPreventedGuid; uint64_t mPendingTouchPreventedBlockId; - bool mEndTouchIsClick; + apz::SingleTapState mEndTouchState; bool mFirstTouchCancelled; bool mTouchEndCancelled; // Set to true when we have received any one of diff --git a/gfx/layers/apz/util/ActiveElementManager.cpp b/gfx/layers/apz/util/ActiveElementManager.cpp index f2d981e9f0..c92fec783a 100644 --- a/gfx/layers/apz/util/ActiveElementManager.cpp +++ b/gfx/layers/apz/util/ActiveElementManager.cpp @@ -10,6 +10,8 @@ #include "mozilla/StaticPrefs_ui.h" #include "mozilla/dom/Element.h" #include "mozilla/dom/Document.h" +#include "mozilla/layers/APZEventState.h" +#include "mozilla/layers/APZUtils.h" #include "nsITimer.h" static mozilla::LazyLogModule sApzAemLog("apz.activeelement"); @@ -21,7 +23,7 @@ namespace layers { class DelayedClearElementActivation final : public nsITimerCallback, public nsINamed { private: - explicit DelayedClearElementActivation(nsCOMPtr<dom::Element>& aTarget, + explicit DelayedClearElementActivation(RefPtr<dom::Element>& aTarget, const nsCOMPtr<nsITimer>& aTimer) : mTarget(aTarget) // Hold the reference count until we are called back. @@ -33,7 +35,7 @@ class DelayedClearElementActivation final : public nsITimerCallback, NS_DECL_ISUPPORTS static RefPtr<DelayedClearElementActivation> Create( - nsCOMPtr<dom::Element>& aTarget); + RefPtr<dom::Element>& aTarget); NS_IMETHOD Notify(nsITimer*) override; @@ -56,11 +58,12 @@ class DelayedClearElementActivation final : public nsITimerCallback, mTimer = nullptr; } } + dom::Element* GetTarget() const { return mTarget; } private: ~DelayedClearElementActivation() = default; - nsCOMPtr<dom::Element> mTarget; + RefPtr<dom::Element> mTarget; nsCOMPtr<nsITimer> mTimer; bool mProcessedSingleTap; }; @@ -77,7 +80,7 @@ static nsPresContext* GetPresContextFor(nsIContent* aContent) { } RefPtr<DelayedClearElementActivation> DelayedClearElementActivation::Create( - nsCOMPtr<dom::Element>& aTarget) { + RefPtr<dom::Element>& aTarget) { nsCOMPtr<nsITimer> timer = NS_NewTimer(); if (!timer) { return nullptr; @@ -137,7 +140,11 @@ void DelayedClearElementActivation::ClearGlobalActiveContent() { NS_IMPL_ISUPPORTS(DelayedClearElementActivation, nsITimerCallback, nsINamed) ActiveElementManager::ActiveElementManager() - : mCanBePan(false), mCanBePanSet(false), mSetActiveTask(nullptr) {} + : mCanBePanOrZoom(false), + mCanBePanOrZoomSet(false), + mSingleTapBeforeActivation(false), + mSingleTapState(apz::SingleTapState::NotClick), + mSetActiveTask(nullptr) {} ActiveElementManager::~ActiveElementManager() = default; @@ -156,9 +163,9 @@ void ActiveElementManager::SetTargetElement(dom::EventTarget* aTarget) { TriggerElementActivation(); } -void ActiveElementManager::HandleTouchStart(bool aCanBePan) { - AEM_LOG("Touch start, aCanBePan: %d\n", aCanBePan); - if (mCanBePanSet) { +void ActiveElementManager::HandleTouchStart(bool aCanBePanOrZoom) { + AEM_LOG("Touch start, aCanBePanOrZoom: %d\n", aCanBePanOrZoom); + if (mCanBePanOrZoomSet) { // Multiple fingers on screen (since HandleTouchEnd clears mCanBePanSet). AEM_LOG("Multiple fingers on-screen, clearing touch block state\n"); CancelTask(); @@ -167,16 +174,29 @@ void ActiveElementManager::HandleTouchStart(bool aCanBePan) { return; } - mCanBePan = aCanBePan; - mCanBePanSet = true; + mCanBePanOrZoom = aCanBePanOrZoom; + mCanBePanOrZoomSet = true; TriggerElementActivation(); } void ActiveElementManager::TriggerElementActivation() { + // Reset mSingleTapState here either when HandleTouchStart() or + // SetTargetElement() gets called. + // NOTE: It's possible that ProcessSingleTap() gets called in between + // HandleTouchStart() and SetTargetElement() calls. I.e., + // mSingleTapBeforeActivation is true, in such cases it doesn't matter that + // mSingleTapState was reset once and referred it in ProcessSingleTap() and + // then reset here again because in ProcessSingleTap() `NotYetDetermined` is + // the only one state we need to care, and it should NOT happen in the + // scenario. In other words the case where we need to care `NotYetDetermined` + // is when ProcessSingleTap() gets called later than any other events and + // notifications. + mSingleTapState = apz::SingleTapState::NotClick; + // Both HandleTouchStart() and SetTargetElement() call this. They can be - // called in either order. One will set mCanBePanSet, and the other, mTarget. - // We want to actually trigger the activation once both are set. - if (!(mTarget && mCanBePanSet)) { + // called in either order. One will set mCanBePanOrZoomSet, and the other, + // mTarget. We want to actually trigger the activation once both are set. + if (!(mTarget && mCanBePanOrZoomSet)) { return; } @@ -190,10 +210,13 @@ void ActiveElementManager::TriggerElementActivation() { // If the touch cannot be a pan, make mTarget :active right away. // Otherwise, wait a bit to see if the user will pan or not. - if (!mCanBePan) { + if (!mCanBePanOrZoom) { SetActive(mTarget); if (mDelayedClearElementActivation) { + if (mSingleTapBeforeActivation) { + mDelayedClearElementActivation->MarkSingleTapProcessed(); + } mDelayedClearElementActivation->StartTimer(); } } else { @@ -210,6 +233,10 @@ void ActiveElementManager::TriggerElementActivation() { task.forget(), StaticPrefs::ui_touch_activation_delay_ms()); AEM_LOG("Scheduling mSetActiveTask %p\n", mSetActiveTask.get()); } + AEM_LOG( + "Got both touch-end event and end touch notiication, clearing pan " + "state\n"); + mCanBePanOrZoomSet = false; } void ActiveElementManager::ClearActivation() { @@ -218,43 +245,70 @@ void ActiveElementManager::ClearActivation() { ResetActive(); } -void ActiveElementManager::HandleTouchEndEvent(bool aWasClick) { - AEM_LOG("Touch end event, aWasClick: %d\n", aWasClick); +bool ActiveElementManager::HandleTouchEndEvent(apz::SingleTapState aState) { + AEM_LOG("Touch end event, state: %hhu\n", static_cast<uint8_t>(aState)); + + mTouchEndState += TouchEndState::GotTouchEndEvent; + return MaybeChangeActiveState(aState); +} + +bool ActiveElementManager::HandleTouchEnd(apz::SingleTapState aState) { + AEM_LOG("Touch end\n"); + + mTouchEndState += TouchEndState::GotTouchEndNotification; + return MaybeChangeActiveState(aState); +} + +bool ActiveElementManager::MaybeChangeActiveState(apz::SingleTapState aState) { + if (mTouchEndState != + TouchEndStates(TouchEndState::GotTouchEndEvent, + TouchEndState::GotTouchEndNotification)) { + return false; + } - // If the touch was a click, make mTarget :active right away. - // nsEventStateManager will reset the active element when processing - // the mouse-down event generated by the click. CancelTask(); - if (aWasClick) { + + mSingleTapState = aState; + + if (aState == apz::SingleTapState::WasClick) { // Scrollbar thumbs use a different mechanism for their active // highlight (the "active" attribute), so don't set the active state // on them because nothing will clear it. - if (!(mTarget && mTarget->IsXULElement(nsGkAtoms::thumb))) { + if (mCanBePanOrZoom && + !(mTarget && mTarget->IsXULElement(nsGkAtoms::thumb))) { SetActive(mTarget); } } else { - // We might reach here if mCanBePan was false on touch-start and + // We might reach here if mCanBePanOrZoom was false on touch-start and // so we set the element active right away. Now it turns out the // action was not a click so we need to reset the active element. ResetActive(); } ResetTouchBlockState(); -} - -void ActiveElementManager::HandleTouchEnd() { - AEM_LOG("Touch end, clearing pan state\n"); - mCanBePanSet = false; + return true; } void ActiveElementManager::ProcessSingleTap() { if (!mDelayedClearElementActivation) { + // We have not received touch-start notification yet. We will have to run + // MarkSingleTapProcessed() when we receive the touch-start notification. + mSingleTapBeforeActivation = true; return; } + if (mSingleTapState == apz::SingleTapState::NotYetDetermined) { + // If we got `NotYetDetermined`, which means at the moment we don't know for + // sure whether double-tapping will be incoming or not, but now we are sure + // that no double-tapping will happen, thus it's time to activate the target + // element. + if (auto* target = mDelayedClearElementActivation->GetTarget()) { + SetActive(target); + } + } mDelayedClearElementActivation->MarkSingleTapProcessed(); - if (mCanBePan) { + if (mCanBePanOrZoom) { // In the case that we have not started the delayed reset of the element // activation state, start the timer now. mDelayedClearElementActivation->StartTimer(); @@ -297,7 +351,14 @@ void ActiveElementManager::ResetActive() { void ActiveElementManager::ResetTouchBlockState() { mTarget = nullptr; - mCanBePanSet = false; + mCanBePanOrZoomSet = false; + mTouchEndState.clear(); + mSingleTapBeforeActivation = false; + // NOTE: Do not reset mSingleTapState here since it will be necessary in + // ProcessSingleTap() to tell whether we need to activate the target element + // because on environments where double-tap is enabled ProcessSingleTap() + // gets called after both of touch-end event and end touch notiication + // arrived. } void ActiveElementManager::SetActiveTask( diff --git a/gfx/layers/apz/util/ActiveElementManager.h b/gfx/layers/apz/util/ActiveElementManager.h index f8a6f07261..1f2e1e4aad 100644 --- a/gfx/layers/apz/util/ActiveElementManager.h +++ b/gfx/layers/apz/util/ActiveElementManager.h @@ -9,6 +9,7 @@ #include "nsCOMPtr.h" #include "nsISupportsImpl.h" +#include "mozilla/EnumSet.h" namespace mozilla { @@ -23,6 +24,10 @@ namespace layers { class DelayedClearElementActivation; +namespace apz { +enum class SingleTapState : uint8_t; +} // namespace apz + /** * Manages setting and clearing the ':active' CSS pseudostate in the presence * of touch input. @@ -46,9 +51,9 @@ class ActiveElementManager final { /** * Handle a touch-start state notification from APZ. This notification * may be delayed until after touch listeners have responded to the APZ. - * @param aCanBePan whether the touch can be a pan + * @param aCanBePanOrZoom whether the touch can be a pan or double-tap-to-zoom */ - void HandleTouchStart(bool aCanBePan); + void HandleTouchStart(bool aCanBePanOrZoom); /** * Clear the active element. */ @@ -57,12 +62,12 @@ class ActiveElementManager final { * Handle a touch-end or touch-cancel event. * @param aWasClick whether the touch was a click */ - void HandleTouchEndEvent(bool aWasClick); + bool HandleTouchEndEvent(apz::SingleTapState aState); /** * Handle a touch-end state notification from APZ. This notification may be * delayed until after touch listeners have responded to the APZ. */ - void HandleTouchEnd(); + bool HandleTouchEnd(apz::SingleTapState aState); /** * Possibly clear active element sate in response to a single tap. */ @@ -76,17 +81,39 @@ class ActiveElementManager final { /** * The target of the first touch point in the current touch block. */ - nsCOMPtr<dom::Element> mTarget; + RefPtr<dom::Element> mTarget; /** - * Whether the current touch block can be a pan. Set in HandleTouchStart(). + * Whether the current touch block can be a pan or double-tap-to-zoom. Set in + * HandleTouchStart(). */ - bool mCanBePan; + bool mCanBePanOrZoom; /** - * Whether mCanBePan has been set for the current touch block. + * Whether mCanBePanOrZoom has been set for the current touch block. * We need to keep track of this to allow HandleTouchStart() and * SetTargetElement() to be called in either order. */ - bool mCanBePanSet; + bool mCanBePanOrZoomSet; + + bool mSingleTapBeforeActivation; + + enum class TouchEndState : uint8_t { + GotTouchEndNotification, + GotTouchEndEvent, + }; + using TouchEndStates = EnumSet<TouchEndState>; + + /** + * A flag tracks whether `APZStateChange::eEndTouch` notification has arrived + * and whether `eTouchEnd` event has arrived. + */ + TouchEndStates mTouchEndState; + + /** + * A tri-state variable to represent the single tap state when both of + * `APZStateChange::eEndTouch` notification and `eTouchEnd` event arrived. + */ + apz::SingleTapState mSingleTapState; + /** * A task for calling SetActive() after a timeout. */ @@ -103,6 +130,8 @@ class ActiveElementManager final { void ResetTouchBlockState(); void SetActiveTask(const nsCOMPtr<dom::Element>& aTarget); void CancelTask(); + // Returns true if the function changed the active element state. + bool MaybeChangeActiveState(apz::SingleTapState aState); }; } // namespace layers diff --git a/gfx/layers/client/TextureClient.cpp b/gfx/layers/client/TextureClient.cpp index 4187e48955..12a34ff92f 100644 --- a/gfx/layers/client/TextureClient.cpp +++ b/gfx/layers/client/TextureClient.cpp @@ -1546,12 +1546,7 @@ TextureClient::TextureClient(TextureData* aData, TextureFlags aFlags, mUpdated(false), mAddedToCompositableClient(false), mFwdTransactionId(0), - mSerial(++sSerialCounter) -#ifdef GFX_DEBUG_TRACK_CLIENTS_IN_POOL - , - mPoolTracker(nullptr) -#endif -{ + mSerial(++sSerialCounter) { mData->FillInfo(mInfo); mFlags |= mData->GetTextureFlags(); } diff --git a/gfx/layers/client/TextureClient.h b/gfx/layers/client/TextureClient.h index ac0a755698..cf1701e3f0 100644 --- a/gfx/layers/client/TextureClient.h +++ b/gfx/layers/client/TextureClient.h @@ -44,12 +44,6 @@ struct ID3D11Device; namespace mozilla { -// When defined, we track which pool the tile came from and test for -// any inconsistencies. This can be defined in release build as well. -#ifdef DEBUG -# define GFX_DEBUG_TRACK_CLIENTS_IN_POOL 1 -#endif - namespace layers { class AndroidHardwareBufferTextureData; @@ -68,9 +62,6 @@ class GPUVideoTextureData; class TextureClient; class ITextureClientRecycleAllocator; class SharedSurfaceTextureData; -#ifdef GFX_DEBUG_TRACK_CLIENTS_IN_POOL -class TextureClientPool; -#endif class TextureForwarder; struct RemoteTextureOwnerId; @@ -696,11 +687,6 @@ class TextureClient : public AtomicRefCountedWithFinalize<TextureClient> { static void TextureClientRecycleCallback(TextureClient* aClient, void* aClosure); - // Internal helpers for creating texture clients using the actual forwarder - // instead of KnowsCompositor. TextureClientPool uses these to let it cache - // texture clients per-process instead of per ShadowLayerForwarder, but - // everyone else should use the public functions instead. - friend class TextureClientPool; static already_AddRefed<TextureClient> CreateForDrawing( TextureForwarder* aAllocator, gfx::SurfaceFormat aFormat, gfx::IntSize aSize, KnowsCompositor* aKnowsCompositor, @@ -795,12 +781,6 @@ class TextureClient : public AtomicRefCountedWithFinalize<TextureClient> { friend void TestTextureClientYCbCr(TextureClient*, PlanarYCbCrData&); friend already_AddRefed<TextureHost> CreateTextureHostWithBackend( TextureClient*, ISurfaceAllocator*, LayersBackend&); - -#ifdef GFX_DEBUG_TRACK_CLIENTS_IN_POOL - public: - // Pointer to the pool this tile came from. - TextureClientPool* mPoolTracker; -#endif }; /** diff --git a/gfx/layers/client/TextureClientPool.cpp b/gfx/layers/client/TextureClientPool.cpp deleted file mode 100644 index 3eb0c908b6..0000000000 --- a/gfx/layers/client/TextureClientPool.cpp +++ /dev/null @@ -1,307 +0,0 @@ -/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ -/* vim: set ts=8 sts=2 et sw=2 tw=80: */ -/* This Source Code Form is subject to the terms of the Mozilla Public - * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ - -#include "TextureClientPool.h" -#include "CompositableClient.h" -#include "mozilla/layers/CompositableForwarder.h" -#include "mozilla/layers/TextureForwarder.h" -#include "mozilla/StaticPrefs_layers.h" - -#include "nsComponentManagerUtils.h" - -#define TCP_LOG(...) -// #define TCP_LOG(...) printf_stderr(__VA_ARGS__); - -namespace mozilla { -namespace layers { - -// We want to shrink to our maximum size of N unused tiles -// after a timeout to allow for short-term budget requirements -static void ShrinkCallback(nsITimer* aTimer, void* aClosure) { - static_cast<TextureClientPool*>(aClosure)->ShrinkToMaximumSize(); -} - -// After a certain amount of inactivity, let's clear the pool so that -// we don't hold onto tiles needlessly. In general, allocations are -// cheap enough that re-allocating isn't an issue unless we're allocating -// at an inopportune time (e.g. mid-animation). -static void ClearCallback(nsITimer* aTimer, void* aClosure) { - static_cast<TextureClientPool*>(aClosure)->Clear(); -} - -TextureClientPool::TextureClientPool( - KnowsCompositor* aKnowsCompositor, gfx::SurfaceFormat aFormat, - gfx::IntSize aSize, TextureFlags aFlags, uint32_t aShrinkTimeoutMsec, - uint32_t aClearTimeoutMsec, uint32_t aInitialPoolSize, - uint32_t aPoolUnusedSize, TextureForwarder* aAllocator) - : mKnowsCompositor(aKnowsCompositor), - mFormat(aFormat), - mSize(aSize), - mFlags(aFlags), - mShrinkTimeoutMsec(aShrinkTimeoutMsec), - mClearTimeoutMsec(aClearTimeoutMsec), - mInitialPoolSize(aInitialPoolSize), - mPoolUnusedSize(aPoolUnusedSize), - mOutstandingClients(0), - mSurfaceAllocator(aAllocator), - mDestroyed(false) { - TCP_LOG("TexturePool %p created with maximum unused texture clients %u\n", - this, mInitialPoolSize); - mShrinkTimer = NS_NewTimer(); - mClearTimer = NS_NewTimer(); - if (aFormat == gfx::SurfaceFormat::UNKNOWN) { - gfxWarning() << "Creating texture pool for SurfaceFormat::UNKNOWN format"; - } -} - -TextureClientPool::~TextureClientPool() { - mShrinkTimer->Cancel(); - mClearTimer->Cancel(); -} - -#ifdef GFX_DEBUG_TRACK_CLIENTS_IN_POOL -static bool TestClientPool(const char* what, TextureClient* aClient, - TextureClientPool* aPool) { - if (!aClient || !aPool) { - return false; - } - - TextureClientPool* actual = aClient->mPoolTracker; - bool ok = (actual == aPool); - if (ok) { - ok = (aClient->GetFormat() == aPool->GetFormat()); - } - - if (!ok) { - if (actual) { - gfxCriticalError() << "Pool error(" << what << "): " << aPool << "-" - << aPool->GetFormat() << ", " << actual << "-" - << actual->GetFormat() << ", " << aClient->GetFormat(); - MOZ_CRASH("GFX: Crashing with actual"); - } else { - gfxCriticalError() << "Pool error(" << what << "): " << aPool << "-" - << aPool->GetFormat() << ", nullptr, " - << aClient->GetFormat(); - MOZ_CRASH("GFX: Crashing without actual"); - } - } - return ok; -} -#endif - -already_AddRefed<TextureClient> TextureClientPool::GetTextureClient() { - // Try to fetch a client from the pool - RefPtr<TextureClient> textureClient; - - // We initially allocate mInitialPoolSize for our pool. If we run - // out of TextureClients, we allocate additional TextureClients to try and - // keep around mPoolUnusedSize - if (mTextureClients.empty()) { - AllocateTextureClient(); - } - - if (mTextureClients.empty()) { - // All our allocations failed, return nullptr - return nullptr; - } - - mOutstandingClients++; - textureClient = mTextureClients.top(); - mTextureClients.pop(); -#ifdef GFX_DEBUG_TRACK_CLIENTS_IN_POOL - if (textureClient) { - textureClient->mPoolTracker = this; - } - DebugOnly<bool> ok = TestClientPool("fetch", textureClient, this); - MOZ_ASSERT(ok); -#endif - TCP_LOG("TexturePool %p giving %p from pool; size %u outstanding %u\n", this, - textureClient.get(), mTextureClients.size(), mOutstandingClients); - - return textureClient.forget(); -} - -void TextureClientPool::AllocateTextureClient() { - TCP_LOG("TexturePool %p allocating TextureClient, outstanding %u\n", this, - mOutstandingClients); - - TextureAllocationFlags allocFlags = ALLOC_DEFAULT; - - RefPtr<TextureClient> newClient; - if (StaticPrefs::layers_force_shmem_tiles_AtStartup()) { - // gfx::BackendType::NONE means use the content backend - newClient = TextureClient::CreateForRawBufferAccess( - mSurfaceAllocator, mFormat, mSize, gfx::BackendType::NONE, GetBackend(), - mFlags, allocFlags); - } else { - newClient = TextureClient::CreateForDrawing( - mSurfaceAllocator, mFormat, mSize, mKnowsCompositor, - BackendSelector::Content, mFlags, allocFlags); - } - - if (newClient) { - mTextureClients.push(newClient); - } -} - -void TextureClientPool::ResetTimers() { - // Shrink down if we're beyond our maximum size - if (mShrinkTimeoutMsec && - mTextureClients.size() + mTextureClientsDeferred.size() > - mPoolUnusedSize) { - TCP_LOG("TexturePool %p scheduling a shrink-to-max-size\n", this); - mShrinkTimer->InitWithNamedFuncCallback( - ShrinkCallback, this, mShrinkTimeoutMsec, nsITimer::TYPE_ONE_SHOT, - "layers::TextureClientPool::ResetTimers"); - } - - // Clear pool after a period of inactivity to reduce memory consumption - if (mClearTimeoutMsec) { - TCP_LOG("TexturePool %p scheduling a clear\n", this); - mClearTimer->InitWithNamedFuncCallback( - ClearCallback, this, mClearTimeoutMsec, nsITimer::TYPE_ONE_SHOT, - "layers::TextureClientPool::ResetTimers"); - } -} - -void TextureClientPool::ReturnTextureClient(TextureClient* aClient) { - if (!aClient || mDestroyed) { - return; - } -#ifdef GFX_DEBUG_TRACK_CLIENTS_IN_POOL - DebugOnly<bool> ok = TestClientPool("return", aClient, this); - MOZ_ASSERT(ok); -#endif - // Add the client to the pool: - MOZ_ASSERT(mOutstandingClients > mTextureClientsDeferred.size()); - mOutstandingClients--; - mTextureClients.push(aClient); - TCP_LOG("TexturePool %p had client %p returned; size %u outstanding %u\n", - this, aClient, mTextureClients.size(), mOutstandingClients); - - ResetTimers(); -} - -void TextureClientPool::ReturnTextureClientDeferred(TextureClient* aClient) { - if (!aClient || mDestroyed) { - return; - } - MOZ_ASSERT(aClient->HasReadLock()); -#ifdef GFX_DEBUG_TRACK_CLIENTS_IN_POOL - DebugOnly<bool> ok = TestClientPool("defer", aClient, this); - MOZ_ASSERT(ok); -#endif - mTextureClientsDeferred.push_back(aClient); - TCP_LOG( - "TexturePool %p had client %p defer-returned, size %u outstanding %u\n", - this, aClient, mTextureClientsDeferred.size(), mOutstandingClients); - - ResetTimers(); -} - -void TextureClientPool::ShrinkToMaximumSize() { - // We're over our desired maximum size, immediately shrink down to the - // maximum. - // - // We cull from the deferred TextureClients first, as we can't reuse those - // until they get returned. - uint32_t totalUnusedTextureClients = - mTextureClients.size() + mTextureClientsDeferred.size(); - - // If we have > mInitialPoolSize outstanding, then we want to keep around - // mPoolUnusedSize at a maximum. If we have fewer than mInitialPoolSize - // outstanding, then keep around the entire initial pool size. - uint32_t targetUnusedClients; - if (mOutstandingClients > mInitialPoolSize) { - targetUnusedClients = mPoolUnusedSize; - } else { - targetUnusedClients = mInitialPoolSize; - } - - TCP_LOG( - "TexturePool %p shrinking to maximum unused size %u; current pool size " - "%u; total outstanding %u\n", - this, targetUnusedClients, totalUnusedTextureClients, - mOutstandingClients); - - while (totalUnusedTextureClients > targetUnusedClients) { - if (!mTextureClientsDeferred.empty()) { - mOutstandingClients--; - TCP_LOG("TexturePool %p dropped deferred client %p; %u remaining\n", this, - mTextureClientsDeferred.front().get(), - mTextureClientsDeferred.size() - 1); - mTextureClientsDeferred.pop_front(); - } else { - TCP_LOG("TexturePool %p dropped non-deferred client %p; %u remaining\n", - this, mTextureClients.top().get(), mTextureClients.size() - 1); - mTextureClients.pop(); - } - totalUnusedTextureClients--; - } -} - -void TextureClientPool::ReturnDeferredClients() { - if (mTextureClientsDeferred.empty()) { - return; - } - - TCP_LOG("TexturePool %p returning %u deferred clients to pool\n", this, - mTextureClientsDeferred.size()); - - ReturnUnlockedClients(); - ShrinkToMaximumSize(); -} - -void TextureClientPool::ReturnUnlockedClients() { - for (auto it = mTextureClientsDeferred.begin(); - it != mTextureClientsDeferred.end();) { - MOZ_ASSERT((*it)->GetNonBlockingReadLockCount() >= 1); - // Last count is held by the lock itself. - if (!(*it)->IsReadLocked()) { - mTextureClients.push(*it); - it = mTextureClientsDeferred.erase(it); - - MOZ_ASSERT(mOutstandingClients > 0); - mOutstandingClients--; - } else { - it++; - } - } -} - -void TextureClientPool::ReportClientLost() { - MOZ_ASSERT(mOutstandingClients > mTextureClientsDeferred.size()); - mOutstandingClients--; - TCP_LOG("TexturePool %p getting report client lost; down to %u outstanding\n", - this, mOutstandingClients); -} - -void TextureClientPool::Clear() { - TCP_LOG("TexturePool %p getting cleared\n", this); - while (!mTextureClients.empty()) { - TCP_LOG("TexturePool %p releasing client %p\n", this, - mTextureClients.top().get()); - mTextureClients.pop(); - } - while (!mTextureClientsDeferred.empty()) { - MOZ_ASSERT(mOutstandingClients > 0); - mOutstandingClients--; - TCP_LOG("TexturePool %p releasing deferred client %p\n", this, - mTextureClientsDeferred.front().get()); - mTextureClientsDeferred.pop_front(); - } -} - -void TextureClientPool::Destroy() { - Clear(); - mDestroyed = true; - mInitialPoolSize = 0; - mPoolUnusedSize = 0; - mKnowsCompositor = nullptr; -} - -} // namespace layers -} // namespace mozilla diff --git a/gfx/layers/client/TextureClientPool.h b/gfx/layers/client/TextureClientPool.h deleted file mode 100644 index d557ca9a03..0000000000 --- a/gfx/layers/client/TextureClientPool.h +++ /dev/null @@ -1,175 +0,0 @@ -/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ -/* vim: set ts=8 sts=2 et sw=2 tw=80: */ -/* This Source Code Form is subject to the terms of the Mozilla Public - * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ - -#ifndef MOZILLA_GFX_TEXTURECLIENTPOOL_H -#define MOZILLA_GFX_TEXTURECLIENTPOOL_H - -#include "mozilla/gfx/Types.h" -#include "mozilla/gfx/Point.h" -#include "mozilla/RefPtr.h" -#include "mozilla/layers/KnowsCompositor.h" -#include "TextureClient.h" -#include "nsITimer.h" -#include <stack> -#include <list> - -namespace mozilla { -namespace layers { - -class ISurfaceAllocator; -class TextureForwarder; -class TextureReadLock; - -class TextureClientAllocator { - protected: - virtual ~TextureClientAllocator() = default; - - public: - NS_INLINE_DECL_REFCOUNTING(TextureClientAllocator) - - virtual already_AddRefed<TextureClient> GetTextureClient() = 0; - - /** - * Return a TextureClient that is not yet ready to be reused, but will be - * imminently. - */ - virtual void ReturnTextureClientDeferred(TextureClient* aClient) = 0; - - virtual void ReportClientLost() = 0; -}; - -class TextureClientPool final : public TextureClientAllocator { - virtual ~TextureClientPool(); - - public: - TextureClientPool(KnowsCompositor* aKnowsCompositor, - gfx::SurfaceFormat aFormat, gfx::IntSize aSize, - TextureFlags aFlags, uint32_t aShrinkTimeoutMsec, - uint32_t aClearTimeoutMsec, uint32_t aInitialPoolSize, - uint32_t aPoolUnusedSize, TextureForwarder* aAllocator); - - /** - * Gets an allocated TextureClient of size and format that are determined - * by the initialisation parameters given to the pool. This will either be - * a cached client that was returned to the pool, or a newly allocated - * client if one isn't available. - * - * All clients retrieved by this method should be returned using the return - * functions, or reported lost so that the pool can manage its size correctly. - */ - already_AddRefed<TextureClient> GetTextureClient() override; - - /** - * Return a TextureClient that is no longer being used and is ready for - * immediate re-use or destruction. - */ - void ReturnTextureClient(TextureClient* aClient); - - /** - * Return a TextureClient that is not yet ready to be reused, but will be - * imminently. - */ - void ReturnTextureClientDeferred(TextureClient* aClient) override; - - /** - * Return any clients to the pool that were previously returned in - * ReturnTextureClientDeferred. - */ - void ReturnDeferredClients(); - - /** - * Attempt to shrink the pool so that there are no more than - * mInitialPoolSize outstanding. - */ - void ShrinkToMaximumSize(); - - /** - * Report that a client retrieved via GetTextureClient() has become - * unusable, so that it will no longer be tracked. - */ - void ReportClientLost() override; - - /** - * Calling this will cause the pool to attempt to relinquish any unused - * clients. - */ - void Clear(); - - LayersBackend GetBackend() const { - return mKnowsCompositor->GetCompositorBackendType(); - } - int32_t GetMaxTextureSize() const { - return mKnowsCompositor->GetMaxTextureSize(); - } - gfx::SurfaceFormat GetFormat() { return mFormat; } - TextureFlags GetFlags() const { return mFlags; } - - /** - * Clear the pool and put it in a state where it won't recycle any new - * texture. - */ - void Destroy(); - - private: - void ReturnUnlockedClients(); - - /// Allocate a single TextureClient to be returned from the pool. - void AllocateTextureClient(); - - /// Reset and/or initialise timers for shrinking/clearing the pool. - void ResetTimers(); - - /// KnowsCompositor passed to the TextureClient for buffer creation. - RefPtr<KnowsCompositor> mKnowsCompositor; - - /// Format is passed to the TextureClient for buffer creation. - gfx::SurfaceFormat mFormat; - - /// The width and height of the tiles to be used. - gfx::IntSize mSize; - - /// Flags passed to the TextureClient for buffer creation. - const TextureFlags mFlags; - - /// How long to wait after a TextureClient is returned before trying - /// to shrink the pool to its maximum size of mPoolUnusedSize. - uint32_t mShrinkTimeoutMsec; - - /// How long to wait after a TextureClient is returned before trying - /// to clear the pool. - uint32_t mClearTimeoutMsec; - - // The initial number of unused texture clients to seed the pool with - // on construction - uint32_t mInitialPoolSize; - - // How many unused texture clients to try and keep around if we go over - // the initial allocation - uint32_t mPoolUnusedSize; - - /// This is a total number of clients in the wild and in the stack of - /// deferred clients (see below). So, the total number of clients in - /// existence is always mOutstandingClients + the size of mTextureClients. - uint32_t mOutstandingClients; - - std::stack<RefPtr<TextureClient>> mTextureClients; - - std::list<RefPtr<TextureClient>> mTextureClientsDeferred; - RefPtr<nsITimer> mShrinkTimer; - RefPtr<nsITimer> mClearTimer; - // This mSurfaceAllocator owns us, so no need to hold a ref to it - TextureForwarder* mSurfaceAllocator; - - // Keep track of whether this pool has been destroyed or not. If it has, - // we won't accept returns of TextureClients anymore, and the refcounting - // should take care of their destruction. - bool mDestroyed; -}; - -} // namespace layers -} // namespace mozilla - -#endif /* MOZILLA_GFX_TEXTURECLIENTPOOL_H */ diff --git a/gfx/layers/client/TextureRecorded.cpp b/gfx/layers/client/TextureRecorded.cpp index da4ca4f8f3..b3596efdc3 100644 --- a/gfx/layers/client/TextureRecorded.cpp +++ b/gfx/layers/client/TextureRecorded.cpp @@ -33,7 +33,7 @@ RecordedTextureData::~RecordedTextureData() { // We need the translator to drop its reference for the DrawTarget first, // because the TextureData might need to destroy its DrawTarget within a lock. mSnapshot = nullptr; - mSnapshotWrapper = nullptr; + DetachSnapshotWrapper(); mDT = nullptr; mCanvasChild->CleanupTexture(mTextureId); mCanvasChild->RecordEvent(RecordedTextureDestruction( @@ -92,10 +92,25 @@ bool RecordedTextureData::Lock(OpenMode aMode) { return true; } +void RecordedTextureData::DetachSnapshotWrapper(bool aInvalidate, + bool aRelease) { + if (mSnapshotWrapper) { + // If the snapshot only has one ref, then we don't need to worry about + // copying before invalidation since it is about to be deleted. Otherwise, + // we need to ensure any internal data is appropriately copied before + // shmems are potentially overwritten if there are still existing users. + mCanvasChild->DetachSurface(mSnapshotWrapper, + aInvalidate && !mSnapshotWrapper->hasOneRef()); + if (aRelease) { + mSnapshotWrapper = nullptr; + } + } +} + void RecordedTextureData::Unlock() { if ((mLockedMode == OpenMode::OPEN_READ_WRITE) && mCanvasChild->ShouldCacheDataSurface()) { - mSnapshotWrapper = nullptr; + DetachSnapshotWrapper(); mSnapshot = mDT->Snapshot(); mDT->DetachAllSnapshots(); mCanvasChild->RecordEvent(RecordedCacheDataSurface(mSnapshot.get())); @@ -108,11 +123,9 @@ void RecordedTextureData::Unlock() { already_AddRefed<gfx::DrawTarget> RecordedTextureData::BorrowDrawTarget() { if (mLockedMode & OpenMode::OPEN_WRITE) { + // The snapshot will be invalidated. mSnapshot = nullptr; - if (mSnapshotWrapper) { - mCanvasChild->DetachSurface(mSnapshotWrapper); - mSnapshotWrapper = nullptr; - } + DetachSnapshotWrapper(true); } return do_AddRef(mDT); } @@ -122,18 +135,22 @@ void RecordedTextureData::EndDraw() { MOZ_ASSERT(mLockedMode == OpenMode::OPEN_READ_WRITE); if (mCanvasChild->ShouldCacheDataSurface()) { - mSnapshotWrapper = nullptr; + DetachSnapshotWrapper(); mSnapshot = mDT->Snapshot(); mCanvasChild->RecordEvent(RecordedCacheDataSurface(mSnapshot.get())); } } already_AddRefed<gfx::SourceSurface> RecordedTextureData::BorrowSnapshot() { - if (mSnapshotWrapper && (!mDT || !mDT->IsDirty())) { - // The DT is unmodified since the last time snapshot was borrowed, so it - // is safe to reattach the snapshot for shmem readbacks. - mCanvasChild->AttachSurface(mSnapshotWrapper); - return do_AddRef(mSnapshotWrapper); + if (mSnapshotWrapper) { + if (!mDT || !mDT->IsDirty()) { + // The DT is unmodified since the last time snapshot was borrowed, so it + // is safe to reattach the snapshot for shmem readbacks. + mCanvasChild->AttachSurface(mSnapshotWrapper); + return do_AddRef(mSnapshotWrapper); + } + + DetachSnapshotWrapper(); } // There are some failure scenarios where we have no DrawTarget and @@ -153,9 +170,10 @@ already_AddRefed<gfx::SourceSurface> RecordedTextureData::BorrowSnapshot() { void RecordedTextureData::ReturnSnapshot( already_AddRefed<gfx::SourceSurface> aSnapshot) { RefPtr<gfx::SourceSurface> snapshot = aSnapshot; - if (mSnapshotWrapper) { - mCanvasChild->DetachSurface(mSnapshotWrapper); - } + // The snapshot needs to be marked detached but we keep the wrapper around + // so that it can be reused without repeatedly creating it and accidentally + // reading back data for each new instantiation. + DetachSnapshotWrapper(false, false); } void RecordedTextureData::Deallocate(LayersIPCChannel* aAllocator) {} diff --git a/gfx/layers/client/TextureRecorded.h b/gfx/layers/client/TextureRecorded.h index 56e504fb54..9e4e69e78d 100644 --- a/gfx/layers/client/TextureRecorded.h +++ b/gfx/layers/client/TextureRecorded.h @@ -58,6 +58,8 @@ class RecordedTextureData final : public TextureData { ~RecordedTextureData() override; + void DetachSnapshotWrapper(bool aInvalidate = false, bool aRelease = true); + int64_t mTextureId; RefPtr<CanvasChild> mCanvasChild; gfx::IntSize mSize; diff --git a/gfx/layers/ipc/CanvasChild.cpp b/gfx/layers/ipc/CanvasChild.cpp index 515463cd8e..a25d5e6799 100644 --- a/gfx/layers/ipc/CanvasChild.cpp +++ b/gfx/layers/ipc/CanvasChild.cpp @@ -133,6 +133,16 @@ class SourceSurfaceCanvasRecording final : public gfx::SourceSurface { void AttachSurface() { mDetached = false; } void DetachSurface() { mDetached = true; } + void InvalidateDataSurface() { + if (mDataSourceSurface && mMayInvalidate) { + // This must be the only reference to the data left. + MOZ_ASSERT(mDataSourceSurface->hasOneRef()); + mDataSourceSurface = + gfx::Factory::CopyDataSourceSurface(mDataSourceSurface); + mMayInvalidate = false; + } + } + already_AddRefed<gfx::SourceSurface> ExtractSubrect( const gfx::IntRect& aRect) final { return mRecordedSurface->ExtractSubrect(aRect); @@ -142,8 +152,8 @@ class SourceSurfaceCanvasRecording final : public gfx::SourceSurface { void EnsureDataSurfaceOnMainThread() { // The data can only be retrieved on the main thread. if (!mDataSourceSurface && NS_IsMainThread()) { - mDataSourceSurface = - mCanvasChild->GetDataSurface(mTextureId, mRecordedSurface, mDetached); + mDataSourceSurface = mCanvasChild->GetDataSurface( + mTextureId, mRecordedSurface, mDetached, mMayInvalidate); } } @@ -167,6 +177,7 @@ class SourceSurfaceCanvasRecording final : public gfx::SourceSurface { RefPtr<CanvasDrawEventRecorder> mRecorder; RefPtr<gfx::DataSourceSurface> mDataSourceSurface; bool mDetached = false; + bool mMayInvalidate = false; }; class CanvasDataShmemHolder { @@ -420,6 +431,7 @@ already_AddRefed<gfx::DrawTargetRecording> CanvasChild::CreateDrawTarget( gfx::BackendType::SKIA, gfx::IntSize(1, 1), aFormat); RefPtr<gfx::DrawTargetRecording> dt = MakeAndAddRef<gfx::DrawTargetRecording>( mRecorder, aTextureId, aTextureOwnerId, dummyDt, aSize); + dt->SetOptimizeTransform(true); mTextureInfo.insert({aTextureId, {}}); @@ -483,7 +495,8 @@ int64_t CanvasChild::CreateCheckpoint() { } already_AddRefed<gfx::DataSourceSurface> CanvasChild::GetDataSurface( - int64_t aTextureId, const gfx::SourceSurface* aSurface, bool aDetached) { + int64_t aTextureId, const gfx::SourceSurface* aSurface, bool aDetached, + bool& aMayInvalidate) { NS_ASSERT_OWNINGTHREAD(CanvasChild); MOZ_ASSERT(aSurface); @@ -527,6 +540,7 @@ already_AddRefed<gfx::DataSourceSurface> CanvasChild::GetDataSurface( gfx::Factory::CreateWrappingDataSourceSurface( shmemPtr, stride, ssSize, ssFormat, ReleaseDataShmemHolder, closure); + aMayInvalidate = true; return dataSurface.forget(); } } @@ -556,6 +570,7 @@ already_AddRefed<gfx::DataSourceSurface> CanvasChild::GetDataSurface( RefPtr<gfx::DataSourceSurface> dataSurface = gfx::Factory::CreateWrappingDataSourceSurface( data, stride, ssSize, ssFormat, ReleaseDataShmemHolder, closure); + aMayInvalidate = false; return dataSurface.forget(); } @@ -593,10 +608,14 @@ void CanvasChild::AttachSurface(const RefPtr<gfx::SourceSurface>& aSurface) { } } -void CanvasChild::DetachSurface(const RefPtr<gfx::SourceSurface>& aSurface) { +void CanvasChild::DetachSurface(const RefPtr<gfx::SourceSurface>& aSurface, + bool aInvalidate) { if (auto* surface = static_cast<SourceSurfaceCanvasRecording*>(aSurface.get())) { surface->DetachSurface(); + if (aInvalidate) { + surface->InvalidateDataSurface(); + } } } diff --git a/gfx/layers/ipc/CanvasChild.h b/gfx/layers/ipc/CanvasChild.h index e22109f406..a0cf22b0ec 100644 --- a/gfx/layers/ipc/CanvasChild.h +++ b/gfx/layers/ipc/CanvasChild.h @@ -22,7 +22,7 @@ class ThreadSafeWorkerRef; namespace gfx { class DrawTargetRecording; class SourceSurface; -} +} // namespace gfx namespace layers { class CanvasDrawEventRecorder; @@ -132,7 +132,8 @@ class CanvasChild final : public PCanvasChild, public SupportsWeakPtr { /** * The DrawTargetRecording is about to change, so detach the old snapshot. */ - void DetachSurface(const RefPtr<gfx::SourceSurface>& aSurface); + void DetachSurface(const RefPtr<gfx::SourceSurface>& aSurface, + bool aInvalidate = false); /** * Get DataSourceSurface from the translated equivalent version of aSurface in @@ -141,11 +142,13 @@ class CanvasChild final : public PCanvasChild, public SupportsWeakPtr { * @param aSurface the SourceSurface in this process for which we need a * DataSourceSurface * @param aDetached whether the surface is old + * @param aMayInvalidate whether the data may be invalidated by future changes * @returns a DataSourceSurface created from data for aSurface retrieve from * GPU process */ already_AddRefed<gfx::DataSourceSurface> GetDataSurface( - int64_t aTextureId, const gfx::SourceSurface* aSurface, bool aDetached); + int64_t aTextureId, const gfx::SourceSurface* aSurface, bool aDetached, + bool& aMayInvalidate); bool RequiresRefresh(int64_t aTextureId) const; diff --git a/gfx/layers/ipc/CanvasTranslator.cpp b/gfx/layers/ipc/CanvasTranslator.cpp index 4a184f48d8..3fd7a4c4c4 100644 --- a/gfx/layers/ipc/CanvasTranslator.cpp +++ b/gfx/layers/ipc/CanvasTranslator.cpp @@ -21,6 +21,7 @@ #include "mozilla/layers/ImageDataSerializer.h" #include "mozilla/layers/SharedSurfacesParent.h" #include "mozilla/layers/TextureClient.h" +#include "mozilla/layers/VideoBridgeParent.h" #include "mozilla/StaticPrefs_gfx.h" #include "mozilla/SyncRunnable.h" #include "mozilla/TaskQueue.h" @@ -112,6 +113,8 @@ static bool CreateAndMapShmem(RefPtr<ipc::SharedMemoryBasic>& aShmem, return true; } +StaticRefPtr<gfx::SharedContextWebgl> CanvasTranslator::sSharedContext; + bool CanvasTranslator::EnsureSharedContextWebgl() { if (!mSharedContext || mSharedContext->IsContextLost()) { if (mSharedContext) { @@ -121,7 +124,14 @@ bool CanvasTranslator::EnsureSharedContextWebgl() { mRemoteTextureOwner->ClearRecycledTextures(); } } - mSharedContext = gfx::SharedContextWebgl::Create(); + // Check if the global shared context is still valid. If not, instantiate + // a new one before we try to use it. + if (!sSharedContext || sSharedContext->IsContextLost()) { + sSharedContext = gfx::SharedContextWebgl::Create(); + } + mSharedContext = sSharedContext; + // If we can't get a new context, then the only thing left to do is block + // new canvases. if (!mSharedContext || mSharedContext->IsContextLost()) { mSharedContext = nullptr; BlockCanvas(); @@ -131,6 +141,13 @@ bool CanvasTranslator::EnsureSharedContextWebgl() { return true; } +void CanvasTranslator::Shutdown() { + if (sSharedContext) { + gfx::CanvasRenderThread::Dispatch(NS_NewRunnableFunction( + "CanvasTranslator::Shutdown", []() { sSharedContext = nullptr; })); + } +} + mozilla::ipc::IPCResult CanvasTranslator::RecvInitTranslator( TextureType aTextureType, TextureType aWebglTextureType, gfx::BackendType aBackendType, Handle&& aReadHandle, @@ -1144,6 +1161,13 @@ void CanvasTranslator::ClearTextureInfo() { mTextureInfo.clear(); mDrawTargets.Clear(); mSharedContext = nullptr; + // If the global shared context's ref is the last ref left, then clear out + // any internal caches and textures from the context, but still keep it + // alive. This saves on startup costs while not contributing significantly + // to memory usage. + if (sSharedContext && sSharedContext->hasOneRef()) { + sSharedContext->ClearCaches(); + } mBaseDT = nullptr; if (mReferenceTextureData) { mReferenceTextureData->Unlock(); @@ -1163,6 +1187,46 @@ already_AddRefed<gfx::SourceSurface> CanvasTranslator::LookupExternalSurface( return mSharedSurfacesHolder->Get(wr::ToExternalImageId(aKey)); } +// Check if the surface descriptor describes a GPUVideo texture for which we +// only have an opaque source/handle from SurfaceDescriptorRemoteDecoder to +// derive the actual texture from. +static bool SDIsNullRemoteDecoder(const SurfaceDescriptor& sd) { + return sd.type() == SurfaceDescriptor::TSurfaceDescriptorGPUVideo && + sd.get_SurfaceDescriptorGPUVideo() + .get_SurfaceDescriptorRemoteDecoder() + .subdesc() + .type() == RemoteDecoderVideoSubDescriptor::Tnull_t; +} + +already_AddRefed<gfx::SourceSurface> +CanvasTranslator::LookupSourceSurfaceFromSurfaceDescriptor( + const SurfaceDescriptor& aDesc) { + if (!SDIsNullRemoteDecoder(aDesc)) { + return nullptr; + } + + const auto& sdrd = aDesc.get_SurfaceDescriptorGPUVideo() + .get_SurfaceDescriptorRemoteDecoder(); + RefPtr<VideoBridgeParent> parent = + VideoBridgeParent::GetSingleton(sdrd.source()); + if (!parent) { + MOZ_ASSERT_UNREACHABLE("unexpected to be called"); + gfxCriticalNote << "TexUnpackSurface failed to get VideoBridgeParent"; + return nullptr; + } + RefPtr<TextureHost> texture = + parent->LookupTexture(mContentId, sdrd.handle()); + if (!texture) { + MOZ_ASSERT_UNREACHABLE("unexpected to be called"); + gfxCriticalNote << "TexUnpackSurface failed to get TextureHost"; + return nullptr; + } + + RefPtr<gfx::DataSourceSurface> surf = texture->GetAsSurface(); + + return surf.forget(); +} + void CanvasTranslator::CheckpointReached() { CheckAndSignalWriter(); } void CanvasTranslator::PauseTranslation() { diff --git a/gfx/layers/ipc/CanvasTranslator.h b/gfx/layers/ipc/CanvasTranslator.h index 5258e0c529..e2b6c587b4 100644 --- a/gfx/layers/ipc/CanvasTranslator.h +++ b/gfx/layers/ipc/CanvasTranslator.h @@ -219,6 +219,9 @@ class CanvasTranslator final : public gfx::InlineTranslator, already_AddRefed<gfx::SourceSurface> LookupExternalSurface( uint64_t aKey) final; + already_AddRefed<gfx::SourceSurface> LookupSourceSurfaceFromSurfaceDescriptor( + const SurfaceDescriptor& aDesc) final; + /** * Gets the cached DataSourceSurface, if it exists, associated with a * SourceSurface from another process. @@ -274,6 +277,8 @@ class CanvasTranslator final : public gfx::InlineTranslator, void GetDataSurface(uint64_t aSurfaceRef); + static void Shutdown(); + private: ~CanvasTranslator(); @@ -333,6 +338,7 @@ class CanvasTranslator final : public gfx::InlineTranslator, #if defined(XP_WIN) RefPtr<ID3D11Device> mDevice; #endif + static StaticRefPtr<gfx::SharedContextWebgl> sSharedContext; RefPtr<gfx::SharedContextWebgl> mSharedContext; RefPtr<RemoteTextureOwnerClient> mRemoteTextureOwner; diff --git a/gfx/layers/ipc/CompositorBridgeChild.cpp b/gfx/layers/ipc/CompositorBridgeChild.cpp index 070e6d673e..83374e3d30 100644 --- a/gfx/layers/ipc/CompositorBridgeChild.cpp +++ b/gfx/layers/ipc/CompositorBridgeChild.cpp @@ -18,8 +18,7 @@ #include "mozilla/layers/CanvasChild.h" #include "mozilla/layers/WebRenderLayerManager.h" #include "mozilla/layers/PTextureChild.h" -#include "mozilla/layers/TextureClient.h" // for TextureClient -#include "mozilla/layers/TextureClientPool.h" // for TextureClientPool +#include "mozilla/layers/TextureClient.h" // for TextureClient #include "mozilla/layers/WebRenderBridgeChild.h" #include "mozilla/layers/SyncObject.h" // for SyncObjectClient #include "mozilla/gfx/CanvasManagerChild.h" @@ -135,10 +134,6 @@ void CompositorBridgeChild::Destroy() { // happens. RefPtr<CompositorBridgeChild> selfRef = this; - for (size_t i = 0; i < mTexturePools.Length(); i++) { - mTexturePools[i]->Destroy(); - } - if (mSectionAllocator) { delete mSectionAllocator; mSectionAllocator = nullptr; @@ -275,9 +270,6 @@ bool CompositorBridgeChild::CompositorIsInGPUProcess() { mozilla::ipc::IPCResult CompositorBridgeChild::RecvDidComposite( const LayersId& aId, const nsTArray<TransactionId>& aTransactionIds, const TimeStamp& aCompositeStart, const TimeStamp& aCompositeEnd) { - // Hold a reference to keep texture pools alive. See bug 1387799 - const auto texturePools = mTexturePools.Clone(); - for (const auto& id : aTransactionIds) { if (mLayerManager) { MOZ_ASSERT(!aId.IsValid()); @@ -293,10 +285,6 @@ mozilla::ipc::IPCResult CompositorBridgeChild::RecvDidComposite( } } - for (size_t i = 0; i < texturePools.Length(); i++) { - texturePools[i]->ReturnDeferredClients(); - } - return IPC_OK(); } diff --git a/gfx/layers/ipc/CompositorBridgeChild.h b/gfx/layers/ipc/CompositorBridgeChild.h index 7e0a4799fe..7ac7ccc197 100644 --- a/gfx/layers/ipc/CompositorBridgeChild.h +++ b/gfx/layers/ipc/CompositorBridgeChild.h @@ -45,7 +45,6 @@ class CompositorManagerChild; class CompositorOptions; class WebRenderLayerManager; class TextureClient; -class TextureClientPool; struct FrameMetrics; struct FwdTransactionCounter; @@ -233,8 +232,6 @@ class CompositorBridgeChild final : public PCompositorBridgeChild, nsCOMPtr<nsISerialEventTarget> mThread; - AutoTArray<RefPtr<TextureClientPool>, 2> mTexturePools; - uint64_t mProcessToken; FixedSizeSmallShmemSectionAllocator* mSectionAllocator; diff --git a/gfx/layers/ipc/CompositorBridgeParent.cpp b/gfx/layers/ipc/CompositorBridgeParent.cpp index 3982791357..767ea47b2f 100644 --- a/gfx/layers/ipc/CompositorBridgeParent.cpp +++ b/gfx/layers/ipc/CompositorBridgeParent.cpp @@ -1777,20 +1777,20 @@ int32_t RecordContentFrameTime( ContentFrameMarker{}); } - mozilla::glean::gfx_content_frame_time::from_paint.AccumulateSamples( - {static_cast<unsigned long long>(fracLatencyNorm)}); + mozilla::glean::gfx_content_frame_time::from_paint.AccumulateSingleSample( + static_cast<unsigned long long>(fracLatencyNorm)); if (!(aTxnId == VsyncId()) && aVsyncStart) { latencyMs = (aCompositeEnd - aVsyncStart).ToMilliseconds(); latencyNorm = latencyMs / aVsyncRate.ToMilliseconds(); fracLatencyNorm = lround(latencyNorm * 100.0); int32_t result = fracLatencyNorm; - mozilla::glean::gfx_content_frame_time::from_vsync.AccumulateSamples( - {static_cast<unsigned long long>(fracLatencyNorm)}); + mozilla::glean::gfx_content_frame_time::from_vsync.AccumulateSingleSample( + static_cast<unsigned long long>(fracLatencyNorm)); if (aContainsSVGGroup) { - mozilla::glean::gfx_content_frame_time::with_svg.AccumulateSamples( - {static_cast<unsigned long long>(fracLatencyNorm)}); + mozilla::glean::gfx_content_frame_time::with_svg.AccumulateSingleSample( + static_cast<unsigned long long>(fracLatencyNorm)); } // Record CONTENT_FRAME_TIME_REASON. @@ -1889,8 +1889,8 @@ int32_t RecordContentFrameTime( fracLatencyNorm = lround(latencyNorm * 100.0); } mozilla::glean::gfx_content_frame_time::without_resource_upload - .AccumulateSamples( - {static_cast<unsigned long long>(fracLatencyNorm)}); + .AccumulateSingleSample( + static_cast<unsigned long long>(fracLatencyNorm)); if (aStats) { latencyMs -= (double(aStats->gpu_cache_upload_time) / 1000000.0); @@ -1898,8 +1898,8 @@ int32_t RecordContentFrameTime( fracLatencyNorm = lround(latencyNorm * 100.0); } mozilla::glean::gfx_content_frame_time::without_resource_upload - .AccumulateSamples( - {static_cast<unsigned long long>(fracLatencyNorm)}); + .AccumulateSingleSample( + static_cast<unsigned long long>(fracLatencyNorm)); } return result; } diff --git a/gfx/layers/ipc/LayersMessageUtils.h b/gfx/layers/ipc/LayersMessageUtils.h index a4e9557ac3..a0da6154d5 100644 --- a/gfx/layers/ipc/LayersMessageUtils.h +++ b/gfx/layers/ipc/LayersMessageUtils.h @@ -18,6 +18,7 @@ #include "ipc/IPCMessageUtils.h" #include "mozilla/ScrollSnapInfo.h" #include "mozilla/ServoBindings.h" +#include "mozilla/dom/WebGLIpdl.h" #include "mozilla/ipc/ByteBuf.h" #include "mozilla/ipc/ProtocolMessageUtils.h" #include "mozilla/layers/APZInputBridge.h" @@ -48,15 +49,11 @@ namespace IPC { template <> struct ParamTraits<mozilla::layers::LayersId> - : public PlainOldDataSerializer<mozilla::layers::LayersId> {}; + : public ParamTraits_TiedFields<mozilla::layers::LayersId> {}; template <typename T> struct ParamTraits<mozilla::layers::BaseTransactionId<T>> - : public PlainOldDataSerializer<mozilla::layers::BaseTransactionId<T>> {}; - -template <> -struct ParamTraits<mozilla::VsyncId> - : public PlainOldDataSerializer<mozilla::VsyncId> {}; + : public ParamTraits_TiedFields<mozilla::layers::BaseTransactionId<T>> {}; template <> struct ParamTraits<mozilla::VsyncEvent> { @@ -419,7 +416,7 @@ struct ParamTraits<mozilla::StyleScrollSnapStop> template <> struct ParamTraits<mozilla::ScrollSnapTargetId> - : public PlainOldDataSerializer<mozilla::ScrollSnapTargetId> {}; + : public ParamTraits_IsEnumCase<mozilla::ScrollSnapTargetId> {}; template <> struct ParamTraits<mozilla::SnapPoint> { @@ -495,26 +492,12 @@ struct ParamTraits<mozilla::ScrollSnapInfo> { }; template <> -struct ParamTraits<mozilla::layers::OverscrollBehaviorInfo> { - // Not using PlainOldDataSerializer so we get enum validation - // for the members. - - typedef mozilla::layers::OverscrollBehaviorInfo paramType; - - static void Write(MessageWriter* aWriter, const paramType& aParam) { - WriteParam(aWriter, aParam.mBehaviorX); - WriteParam(aWriter, aParam.mBehaviorY); - } - - static bool Read(MessageReader* aReader, paramType* aResult) { - return (ReadParam(aReader, &aResult->mBehaviorX) && - ReadParam(aReader, &aResult->mBehaviorY)); - } -}; +struct ParamTraits<mozilla::layers::OverscrollBehaviorInfo> + : public ParamTraits_TiedFields<mozilla::layers::OverscrollBehaviorInfo> {}; template <typename T> struct ParamTraits<mozilla::ScrollGeneration<T>> - : PlainOldDataSerializer<mozilla::ScrollGeneration<T>> {}; + : public ParamTraits_TiedFields<mozilla::ScrollGeneration<T>> {}; template <> struct ParamTraits<mozilla::ScrollUpdateType> diff --git a/gfx/layers/ipc/SharedSurfacesMemoryReport.h b/gfx/layers/ipc/SharedSurfacesMemoryReport.h index 81baf1349f..31e27bccad 100644 --- a/gfx/layers/ipc/SharedSurfacesMemoryReport.h +++ b/gfx/layers/ipc/SharedSurfacesMemoryReport.h @@ -12,6 +12,7 @@ #include "base/process.h" #include "ipc/IPCMessageUtils.h" #include "ipc/IPCMessageUtilsSpecializations.h" +#include "mozilla/dom/WebGLIpdl.h" #include "mozilla/gfx/Point.h" // for IntSize namespace mozilla { @@ -26,8 +27,16 @@ class SharedSurfacesMemoryReport final { int32_t mStride; uint32_t mConsumers; bool mCreatorRef; + PaddingField<bool, 3> _padding; + + auto MutTiedFields() { + return std::tie(mCreatorPid, mSize, mStride, mConsumers, mCreatorRef, + _padding); + } }; + auto MutTiedFields() { return std::tie(mSurfaces); } + std::unordered_map<uint64_t, SurfaceEntry> mSurfaces; }; @@ -37,21 +46,13 @@ class SharedSurfacesMemoryReport final { namespace IPC { template <> -struct ParamTraits<mozilla::layers::SharedSurfacesMemoryReport> { - typedef mozilla::layers::SharedSurfacesMemoryReport paramType; - - static void Write(MessageWriter* aWriter, const paramType& aParam) { - WriteParam(aWriter, aParam.mSurfaces); - } - - static bool Read(MessageReader* aReader, paramType* aResult) { - return ReadParam(aReader, &aResult->mSurfaces); - } -}; +struct ParamTraits<mozilla::layers::SharedSurfacesMemoryReport> + : public ParamTraits_TiedFields< + mozilla::layers::SharedSurfacesMemoryReport> {}; template <> struct ParamTraits<mozilla::layers::SharedSurfacesMemoryReport::SurfaceEntry> - : public PlainOldDataSerializer< + : public ParamTraits_TiedFields< mozilla::layers::SharedSurfacesMemoryReport::SurfaceEntry> {}; } // namespace IPC diff --git a/gfx/layers/moz.build b/gfx/layers/moz.build index 384611b68b..176e6b54a4 100644 --- a/gfx/layers/moz.build +++ b/gfx/layers/moz.build @@ -125,7 +125,6 @@ EXPORTS.mozilla.layers += [ "client/GPUVideoTextureClient.h", "client/ImageClient.h", "client/TextureClient.h", - "client/TextureClientPool.h", "client/TextureClientRecycleAllocator.h", "client/TextureClientSharedSurface.h", "client/TextureRecorded.h", @@ -354,7 +353,6 @@ UNIFIED_SOURCES += [ "client/CompositableClient.cpp", "client/GPUVideoTextureClient.cpp", "client/ImageClient.cpp", - "client/TextureClientPool.cpp", "client/TextureClientRecycleAllocator.cpp", "client/TextureClientSharedSurface.cpp", "client/TextureRecorded.cpp", diff --git a/gfx/layers/wr/StackingContextHelper.cpp b/gfx/layers/wr/StackingContextHelper.cpp index d5ffbf26a0..2c0627f63f 100644 --- a/gfx/layers/wr/StackingContextHelper.cpp +++ b/gfx/layers/wr/StackingContextHelper.cpp @@ -272,5 +272,14 @@ Maybe<gfx::Matrix4x4> StackingContextHelper::GetDeferredTransformMatrix() } } +void StackingContextHelper::ClearDeferredTransformItem() const { + mDeferredTransformItem = nullptr; +} + +void StackingContextHelper::RestoreDeferredTransformItem( + nsDisplayTransform* aItem) const { + mDeferredTransformItem = aItem; +} + } // namespace layers } // namespace mozilla diff --git a/gfx/layers/wr/StackingContextHelper.h b/gfx/layers/wr/StackingContextHelper.h index 368449a2fd..8239b2db0d 100644 --- a/gfx/layers/wr/StackingContextHelper.h +++ b/gfx/layers/wr/StackingContextHelper.h @@ -56,6 +56,12 @@ class MOZ_RAII StackingContextHelper { nsDisplayTransform* GetDeferredTransformItem() const; Maybe<gfx::Matrix4x4> GetDeferredTransformMatrix() const; + // Functions for temporarily clearing and restoring the deferred + // transform item during WebRender display list building. These are + // used to ensure deferred transforms are not applied in duplicate + // to nested nodes in the WebRenderScrollData tree. + void ClearDeferredTransformItem() const; + void RestoreDeferredTransformItem(nsDisplayTransform* aItem) const; bool AffectsClipPositioning() const { return mAffectsClipPositioning; } Maybe<wr::WrSpatialId> ReferenceFrameId() const { return mReferenceFrameId; } @@ -105,19 +111,19 @@ class MOZ_RAII StackingContextHelper { // item (i.e. the closest ancestor nsDisplayTransform item of the item that // created this StackingContextHelper). And then we use // mDeferredAncestorTransform to store the product of all the other transforms - // that were deferred. As a result, there is an invariant here that if - // mDeferredTransformItem is nullptr, mDeferredAncestorTransform will also - // be Nothing(). Note that we can only do this if the nsDisplayTransform items - // share the same ASR. If we are processing an nsDisplayTransform item with a - // different ASR than the previously-deferred item, we assume that the - // previously-deferred transform will get sent to APZ as part of a separate - // WebRenderLayerScrollData item, and so we don't need to bother with any - // merging. (The merging probably wouldn't even make sense because the - // coordinate spaces might be different in the face of async scrolling). This - // behaviour of forcing a WebRenderLayerScrollData item to be generated when - // the ASR changes is implemented in + // that were deferred. Note that this means we only need to look at + // mDeferredAncestorTransform if mDeferredTransformItem is set. Note that we + // can only do this if the nsDisplayTransform items share the same ASR. If we + // are processing an nsDisplayTransform item with a different ASR than the + // previously-deferred item, we assume that the previously-deferred transform + // will get sent to APZ as part of a separate WebRenderLayerScrollData item, + // and so we don't need to bother with any merging. (The merging probably + // wouldn't even make sense because the coordinate spaces might be different + // in the face of async scrolling). This behaviour of forcing a + // WebRenderLayerScrollData item to be generated when the ASR changes is + // implemented in // WebRenderCommandBuilder::CreateWebRenderCommandsFromDisplayList. - nsDisplayTransform* mDeferredTransformItem; + mutable nsDisplayTransform* mDeferredTransformItem; Maybe<gfx::Matrix4x4> mDeferredAncestorTransform; bool mRasterizeLocally; diff --git a/gfx/layers/wr/WebRenderBridgeParent.cpp b/gfx/layers/wr/WebRenderBridgeParent.cpp index 83139b6af6..68e0c7af8c 100644 --- a/gfx/layers/wr/WebRenderBridgeParent.cpp +++ b/gfx/layers/wr/WebRenderBridgeParent.cpp @@ -1919,6 +1919,10 @@ mozilla::ipc::IPCResult WebRenderBridgeParent::RecvClearCachedResources() { wr::AsUint64(mPipelineId), wr::AsUint64(mApi->GetId()), IsRootWebRenderBridgeParent()); + if (!IsRootWebRenderBridgeParent()) { + mApi->FlushPendingWrTransactionEventsWithoutWait(); + } + // Clear resources wr::TransactionBuilder txn(mApi); txn.SetLowPriority(true); @@ -2618,17 +2622,17 @@ void WebRenderBridgeParent::ScheduleGenerateFrame(wr::RenderReasons aReasons) { } void WebRenderBridgeParent::FlushRendering(wr::RenderReasons aReasons, - bool aWaitForPresent) { + bool aBlocking) { if (mDestroyed) { return; } - // This gets called during e.g. window resizes, so we need to flush the - // scene (which has the display list at the new window size). - FlushSceneBuilds(); - FlushFrameGeneration(aReasons); - if (aWaitForPresent) { + if (aBlocking) { + FlushSceneBuilds(); + FlushFrameGeneration(aReasons); FlushFramePresentation(); + } else { + ScheduleGenerateFrame(aReasons); } } diff --git a/gfx/layers/wr/WebRenderBridgeParent.h b/gfx/layers/wr/WebRenderBridgeParent.h index d8d80d1047..2c23779fb2 100644 --- a/gfx/layers/wr/WebRenderBridgeParent.h +++ b/gfx/layers/wr/WebRenderBridgeParent.h @@ -244,7 +244,7 @@ class WebRenderBridgeParent final : public PWebRenderBridgeParent, return aFontKey.mNamespace == mIdNamespace; } - void FlushRendering(wr::RenderReasons aReasons, bool aWaitForPresent = true); + void FlushRendering(wr::RenderReasons aReasons, bool aBlocking = true); /** * Schedule generating WebRender frame definitely at next composite timing. diff --git a/gfx/layers/wr/WebRenderCommandBuilder.cpp b/gfx/layers/wr/WebRenderCommandBuilder.cpp index e1bb2e1127..d7c7468f56 100644 --- a/gfx/layers/wr/WebRenderCommandBuilder.cpp +++ b/gfx/layers/wr/WebRenderCommandBuilder.cpp @@ -1882,9 +1882,8 @@ struct NewLayerData { ScrollableLayerGuid::ViewID mDeferredId = ScrollableLayerGuid::NULL_SCROLL_ID; bool mTransformShouldGetOwnLayer = false; - void ComputeDeferredTransformInfo( - const StackingContextHelper& aSc, nsDisplayItem* aItem, - nsDisplayTransform* aLastDeferredTransform) { + void ComputeDeferredTransformInfo(const StackingContextHelper& aSc, + nsDisplayItem* aItem) { // See the comments on StackingContextHelper::mDeferredTransformItem // for an overview of what deferred transforms are. // In the case where we deferred a transform, but have a child display @@ -1900,14 +1899,6 @@ struct NewLayerData { // that we deferred, and a child WebRenderLayerScrollData item that // holds the scroll metadata for the child's ASR. mDeferredItem = aSc.GetDeferredTransformItem(); - // If this deferred transform is already slated to be emitted onto an - // ancestor layer, do not emit it on this layer as well. Note that it's - // sufficient to check the most recently deferred item here, because - // there's only one per stacking context, and we emit it when changing - // stacking contexts. - if (mDeferredItem == aLastDeferredTransform) { - mDeferredItem = nullptr; - } if (mDeferredItem) { // It's possible the transform's ASR is not only an ancestor of // the item's ASR, but an ancestor of stopAtAsr. In such cases, @@ -2071,10 +2062,7 @@ void WebRenderCommandBuilder::CreateWebRenderCommandsFromDisplayList( newLayerData->mLayerCountBeforeRecursing = mLayerScrollData.size(); newLayerData->mStopAtAsr = mAsrStack.empty() ? nullptr : mAsrStack.back(); - newLayerData->ComputeDeferredTransformInfo( - aSc, item, - mDeferredTransformStack.empty() ? nullptr - : mDeferredTransformStack.back()); + newLayerData->ComputeDeferredTransformInfo(aSc, item); // Ensure our children's |stopAtAsr| is not be an ancestor of our // |stopAtAsr|, otherwise we could get cyclic scroll metadata @@ -2096,10 +2084,12 @@ void WebRenderCommandBuilder::CreateWebRenderCommandsFromDisplayList( mAsrStack.push_back(stopAtAsrForChildren); // If we're going to emit a deferred transform onto this layer, - // keep track of that so descendant layers know not to emit the - // same deferred transform. + // clear the deferred transform from the StackingContextHelper + // while we are building the subtree of descendant layers. + // This ensures that the deferred transform is not applied in + // duplicate to any of our descendant layers. if (newLayerData->mDeferredItem) { - mDeferredTransformStack.push_back(newLayerData->mDeferredItem); + aSc.ClearDeferredTransformItem(); } } } @@ -2143,8 +2133,7 @@ void WebRenderCommandBuilder::CreateWebRenderCommandsFromDisplayList( mAsrStack.pop_back(); if (newLayerData->mDeferredItem) { - MOZ_ASSERT(!mDeferredTransformStack.empty()); - mDeferredTransformStack.pop_back(); + aSc.RestoreDeferredTransformItem(newLayerData->mDeferredItem); } const ActiveScrolledRoot* stopAtAsr = newLayerData->mStopAtAsr; diff --git a/gfx/layers/wr/WebRenderCommandBuilder.h b/gfx/layers/wr/WebRenderCommandBuilder.h index 68c9a4ce63..8f17a73e3f 100644 --- a/gfx/layers/wr/WebRenderCommandBuilder.h +++ b/gfx/layers/wr/WebRenderCommandBuilder.h @@ -208,9 +208,6 @@ class WebRenderCommandBuilder final { // need this so that WebRenderLayerScrollData items that deeper in the // tree don't duplicate scroll metadata that their ancestors already have. std::vector<const ActiveScrolledRoot*> mAsrStack; - // A similar stack to track the deferred transform that we decided to emit - // most recently. - std::vector<nsDisplayTransform*> mDeferredTransformStack; const ActiveScrolledRoot* mLastAsr; WebRenderUserDataRefTable mWebRenderUserDatas; diff --git a/gfx/layers/wr/WebRenderLayerManager.cpp b/gfx/layers/wr/WebRenderLayerManager.cpp index 66b98a7db3..c29517c696 100644 --- a/gfx/layers/wr/WebRenderLayerManager.cpp +++ b/gfx/layers/wr/WebRenderLayerManager.cpp @@ -96,6 +96,7 @@ bool WebRenderLayerManager::Initialize( } mWrChild = static_cast<WebRenderBridgeChild*>(bridge); + mHasFlushedThisChild = false; TextureFactoryIdentifier textureFactoryIdentifier; wr::MaybeIdNamespace idNamespace; @@ -694,24 +695,30 @@ void WebRenderLayerManager::FlushRendering(wr::RenderReasons aReasons) { } MOZ_ASSERT(mWidget); - // If value of IsResizingNativeWidget() is nothing, we assume that resizing - // might happen. - bool resizing = mWidget && mWidget->IsResizingNativeWidget().valueOr(true); + // If widget bounds size is different from the last flush, consider + // this to be a resize. It's important to use GetClientSize here, + // because that has extra plumbing to support initial display cases + // where the widget doesn't yet have real bounds. + LayoutDeviceIntSize widgetSize = mWidget->GetClientSize(); + bool resizing = widgetSize != mFlushWidgetSize; + mFlushWidgetSize = widgetSize; if (resizing) { aReasons = aReasons | wr::RenderReasons::RESIZE; } - // Limit async FlushRendering to !resizing and Win DComp. - // XXX relax the limitation - if (WrBridge()->GetCompositorUseDComp() && !resizing) { - cBridge->SendFlushRenderingAsync(aReasons); - } else if (mWidget->SynchronouslyRepaintOnResize() || - StaticPrefs::layers_force_synchronous_resize()) { + // Check for the conditions where we we force a sync flush. The first + // flush for this child should always be sync. Resizes should be + // sometimes be sync. Everything else can be async. + if (!mHasFlushedThisChild || + (resizing && (mWidget->SynchronouslyRepaintOnResize() || + StaticPrefs::layers_force_synchronous_resize()))) { cBridge->SendFlushRendering(aReasons); } else { cBridge->SendFlushRenderingAsync(aReasons); } + + mHasFlushedThisChild = true; } void WebRenderLayerManager::WaitOnTransactionProcessed() { diff --git a/gfx/layers/wr/WebRenderLayerManager.h b/gfx/layers/wr/WebRenderLayerManager.h index 31fc9b6678..5ee2cc76a5 100644 --- a/gfx/layers/wr/WebRenderLayerManager.h +++ b/gfx/layers/wr/WebRenderLayerManager.h @@ -226,6 +226,7 @@ class WebRenderLayerManager final : public WindowRenderer { nsIWidget* MOZ_NON_OWNING_REF mWidget; RefPtr<WebRenderBridgeChild> mWrChild; + bool mHasFlushedThisChild; RefPtr<TransactionIdAllocator> mTransactionIdAllocator; TransactionId mLatestTransactionId; @@ -273,6 +274,8 @@ class WebRenderLayerManager final : public WindowRenderer { UniquePtr<wr::DisplayListBuilder> mDLBuilder; ScrollUpdatesMap mPendingScrollUpdates; + + LayoutDeviceIntSize mFlushWidgetSize; }; } // namespace layers diff --git a/gfx/ots/src/gdef.cc b/gfx/ots/src/gdef.cc index 0e01a93845..7e0b7cc301 100644 --- a/gfx/ots/src/gdef.cc +++ b/gfx/ots/src/gdef.cc @@ -176,8 +176,8 @@ bool OpenTypeGDEF::ParseLigCaretListTable(const uint8_t *data, size_t length) { return Error("Can't read device offset for caret value %d " "in glyph %d", j, i); } - uint16_t absolute_offset = lig_glyphs[i] + caret_value_offsets[j] - + offset_device; + size_t absolute_offset = lig_glyphs[i] + caret_value_offsets[j] + + offset_device; if (offset_device == 0 || absolute_offset >= length) { return Error("Bad device offset for caret value %d in glyph %d: %d", j, i, offset_device); diff --git a/gfx/src/nsDeviceContext.cpp b/gfx/src/nsDeviceContext.cpp index 28b2c34652..2bb53010a8 100644 --- a/gfx/src/nsDeviceContext.cpp +++ b/gfx/src/nsDeviceContext.cpp @@ -208,6 +208,16 @@ uint16_t nsDeviceContext::GetScreenOrientationAngle() { return screen->GetOrientationAngle(); } +bool nsDeviceContext::GetScreenIsHDR() { + RefPtr<widget::Screen> screen = FindScreen(); + if (!screen) { + auto& screenManager = ScreenManager::GetSingleton(); + screen = screenManager.GetPrimaryScreen(); + MOZ_ASSERT(screen); + } + return screen->GetIsHDR(); +} + nsresult nsDeviceContext::GetDeviceSurfaceDimensions(nscoord& aWidth, nscoord& aHeight) { if (IsPrinterContext()) { diff --git a/gfx/src/nsDeviceContext.h b/gfx/src/nsDeviceContext.h index 5238c1f71d..7bf7a6aee2 100644 --- a/gfx/src/nsDeviceContext.h +++ b/gfx/src/nsDeviceContext.h @@ -150,6 +150,11 @@ class nsDeviceContext final { uint16_t GetScreenOrientationAngle(); /** + * Get the status of HDR support of the associated screen. + */ + bool GetScreenIsHDR(); + + /** * Get the size of the displayable area of the output device * in app units. * @param aWidth out parameter for width diff --git a/gfx/thebes/COLRFonts.cpp b/gfx/thebes/COLRFonts.cpp index 6369bf191d..4a451f22d3 100644 --- a/gfx/thebes/COLRFonts.cpp +++ b/gfx/thebes/COLRFonts.cpp @@ -1298,8 +1298,7 @@ struct PaintGlyph { // Core Text's own color font support may step in and ignore the // pattern. So to avoid this, fill the glyph as a path instead. #if XP_MACOSX - RefPtr<Path> path = - aState.mScaledFont->GetPathForGlyphs(buffer, aState.mDrawTarget); + RefPtr<Path> path = GetPathForGlyphs(aState, buffer); aState.mDrawTarget->Fill(path, *fillPattern, aState.mDrawOptions); #else aState.mDrawTarget->FillGlyphs(aState.mScaledFont, buffer, *fillPattern, @@ -1307,8 +1306,7 @@ struct PaintGlyph { #endif return true; } - RefPtr<Path> path = - aState.mScaledFont->GetPathForGlyphs(buffer, aState.mDrawTarget); + RefPtr<Path> path = GetPathForGlyphs(aState, buffer); aState.mDrawTarget->PushClip(path); bool ok = DispatchPaint(aState, aOffset + paintOffset, aBounds); aState.mDrawTarget->PopClip(); @@ -1319,10 +1317,19 @@ struct PaintGlyph { MOZ_ASSERT(format == kFormat); Glyph g{uint16_t(glyphID), Point()}; GlyphBuffer buffer{&g, 1}; - RefPtr<Path> path = - aState.mScaledFont->GetPathForGlyphs(buffer, aState.mDrawTarget); + RefPtr<Path> path = GetPathForGlyphs(aState, buffer); return path->GetFastBounds(); } + + private: + RefPtr<Path> GetPathForGlyphs(const PaintState& aState, + const GlyphBuffer& buffer) const { + if (aState.mDrawTarget->GetBackendType() == BackendType::WEBRENDER_TEXT) { + RefPtr dt = gfxPlatform::ThreadLocalScreenReferenceDrawTarget(); + return aState.mScaledFont->GetPathForGlyphs(buffer, dt); + } + return aState.mScaledFont->GetPathForGlyphs(buffer, aState.mDrawTarget); + } }; struct PaintColrGlyph { diff --git a/gfx/thebes/DeviceManagerDx.cpp b/gfx/thebes/DeviceManagerDx.cpp index ba473e0d1e..7d00e5ac1d 100644 --- a/gfx/thebes/DeviceManagerDx.cpp +++ b/gfx/thebes/DeviceManagerDx.cpp @@ -234,17 +234,47 @@ void DeviceManagerDx::PostUpdateMonitorInfo() { holder->GetCompositorThread()->DelayedDispatch(runnable.forget(), kDelayMS); } +static bool ColorSpaceIsHDR(const DXGI_OUTPUT_DESC1& aDesc) { + // Set isHDR to true if the output has a BT2020 colorspace with EOTF2084 + // gamma curve, this indicates the system is sending an HDR format to + // this monitor. The colorspace returned by DXGI is very vague - we only + // see DXGI_COLOR_SPACE_RGB_FULL_G2084_NONE_P2020 for HDR and + // DXGI_COLOR_SPACE_RGB_FULL_G22_NONE_P709 for SDR modes, even if the + // monitor is using something like YCbCr444 according to Settings + // (System -> Display Settings -> Advanced Display). To get more specific + // info we would need to query the DISPLAYCONFIG values in WinGDI. + // + // Note that we don't check bit depth here, since as of Windows 11 22H2, + // HDR is supported with 8bpc for lower bandwidth, where DWM converts to + // dithered RGB8 rather than RGB10, which doesn't really matter here. + // + // Since RefreshScreens(), the caller of this function, is triggered + // by WM_DISPLAYCHANGE, this will pick up changes to the monitors in + // all the important cases (resolution/color changes by the user). + // + // Further reading: + // https://learn.microsoft.com/en-us/windows/win32/direct3darticles/high-dynamic-range + // https://learn.microsoft.com/en-us/windows/win32/api/wingdi/ns-wingdi-displayconfig_sdr_white_level + bool isHDR = (aDesc.ColorSpace == DXGI_COLOR_SPACE_RGB_FULL_G2084_NONE_P2020); + + return isHDR; +} + void DeviceManagerDx::UpdateMonitorInfo() { bool systemHdrEnabled = false; + std::set<HMONITOR> hdrMonitors; for (const auto& desc : GetOutputDescs()) { - if (desc.ColorSpace == DXGI_COLOR_SPACE_RGB_FULL_G2084_NONE_P2020) { + if (ColorSpaceIsHDR(desc)) { systemHdrEnabled = true; + hdrMonitors.emplace(desc.Monitor); } } + { MutexAutoLock lock(mDeviceLock); mSystemHdrEnabled = Some(systemHdrEnabled); + mHdrMonitors.swap(hdrMonitors); mUpdateMonitorInfoRunnable = nullptr; } } @@ -329,6 +359,42 @@ bool DeviceManagerDx::SystemHDREnabled() { return mSystemHdrEnabled.ref(); } +bool DeviceManagerDx::WindowHDREnabled(HWND aWindow) { + MOZ_ASSERT(aWindow); + + HMONITOR monitor = ::MonitorFromWindow(aWindow, MONITOR_DEFAULTTONEAREST); + return MonitorHDREnabled(monitor); +} + +bool DeviceManagerDx::MonitorHDREnabled(HMONITOR aMonitor) { + if (!aMonitor) { + return false; + } + + bool needInit = false; + + { + MutexAutoLock lock(mDeviceLock); + if (mSystemHdrEnabled.isNothing()) { + needInit = true; + } + } + + if (needInit) { + UpdateMonitorInfo(); + } + + MutexAutoLock lock(mDeviceLock); + MOZ_ASSERT(mSystemHdrEnabled.isSome()); + + auto it = mHdrMonitors.find(aMonitor); + if (it == mHdrMonitors.end()) { + return false; + } + + return true; +} + void DeviceManagerDx::CheckHardwareStretchingSupport(HwStretchingSupport& aRv) { RefPtr<IDXGIAdapter> adapter = GetDXGIAdapter(); @@ -651,9 +717,11 @@ already_AddRefed<IDXGIAdapter1> DeviceManagerDx::GetDXGIAdapter() { } IDXGIAdapter1* DeviceManagerDx::GetDXGIAdapterLocked() { - if (mAdapter) { + if (mAdapter && mFactory && mFactory->IsCurrent()) { return mAdapter; } + mAdapter = nullptr; + mFactory = nullptr; nsModuleHandle dxgiModule(LoadLibrarySystem32(L"dxgi.dll")); decltype(CreateDXGIFactory1)* createDXGIFactory1 = @@ -668,50 +736,32 @@ IDXGIAdapter1* DeviceManagerDx::GetDXGIAdapterLocked() { // Try to use a DXGI 1.1 adapter in order to share resources // across processes. - RefPtr<IDXGIFactory1> factory1; if (StaticPrefs::gfx_direct3d11_enable_debug_layer_AtStartup()) { - RefPtr<IDXGIFactory2> factory2; if (fCreateDXGIFactory2) { auto hr = fCreateDXGIFactory2(DXGI_CREATE_FACTORY_DEBUG, __uuidof(IDXGIFactory2), - getter_AddRefs(factory2)); + getter_AddRefs(mFactory)); MOZ_ALWAYS_TRUE(!FAILED(hr)); } else { NS_WARNING( "fCreateDXGIFactory2 not loaded, cannot create debug IDXGIFactory2."); } - factory1 = factory2; } - if (!factory1) { + if (!mFactory) { HRESULT hr = - createDXGIFactory1(__uuidof(IDXGIFactory1), getter_AddRefs(factory1)); - if (FAILED(hr) || !factory1) { + createDXGIFactory1(__uuidof(IDXGIFactory1), getter_AddRefs(mFactory)); + if (FAILED(hr) || !mFactory) { // This seems to happen with some people running the iZ3D driver. // They won't get acceleration. return nullptr; } } - if (!mDeviceStatus) { - // If we haven't created a device yet, and have no existing device status, - // then this must be the compositor device. Pick the first adapter we can. - if (FAILED(factory1->EnumAdapters1(0, getter_AddRefs(mAdapter)))) { - return nullptr; - } - } else { - // In the UI and GPU process, we clear mDeviceStatus on device reset, so we - // should never reach here. Furthermore, the UI process does not create - // devices when using a GPU process. - // - // So, this should only ever get called on the content process or RDD - // process - MOZ_ASSERT(XRE_IsContentProcess() || XRE_IsRDDProcess()); - - // In the child process, we search for the adapter that matches the parent - // process. The first adapter can be mismatched on dual-GPU systems. + if (mDeviceStatus) { + // Match the adapter to our mDeviceStatus, if possible. for (UINT index = 0;; index++) { RefPtr<IDXGIAdapter1> adapter; - if (FAILED(factory1->EnumAdapters1(index, getter_AddRefs(adapter)))) { + if (FAILED(mFactory->EnumAdapters1(index, getter_AddRefs(adapter)))) { break; } @@ -730,7 +780,9 @@ IDXGIAdapter1* DeviceManagerDx::GetDXGIAdapterLocked() { } if (!mAdapter) { - return nullptr; + mDeviceStatus.reset(); + // Pick the first adapter available. + mFactory->EnumAdapters1(0, getter_AddRefs(mAdapter)); } // We leak this module everywhere, we might as well do so here as well. @@ -1045,7 +1097,7 @@ FeatureStatus DeviceManagerDx::CreateContentDevice() { } RefPtr<ID3D11Device> DeviceManagerDx::CreateDecoderDevice( - bool aHardwareWebRender) { + DeviceFlagSet aFlags) { MutexAutoLock lock(mDeviceLock); if (!mDeviceStatus) { @@ -1054,30 +1106,33 @@ RefPtr<ID3D11Device> DeviceManagerDx::CreateDecoderDevice( bool isAMD = mDeviceStatus->adapter().VendorId == 0x1002; bool reuseDevice = false; - if (gfxVars::ReuseDecoderDevice()) { - reuseDevice = true; - } else if (isAMD) { - reuseDevice = true; - gfxCriticalNoteOnce << "Always have to reuse decoder device on AMD"; - } + if (!aFlags.contains(DeviceFlag::disableDeviceReuse)) { + if (gfxVars::ReuseDecoderDevice()) { + reuseDevice = true; + } else if (isAMD) { + reuseDevice = true; + gfxCriticalNoteOnce << "Always have to reuse decoder device on AMD"; + } - if (reuseDevice) { - // Use mCompositorDevice for decoder device only for hardware WebRender. - if (aHardwareWebRender && mCompositorDevice && - mCompositorDeviceSupportsVideo && !mDecoderDevice) { - mDecoderDevice = mCompositorDevice; - - RefPtr<ID3D10Multithread> multi; - mDecoderDevice->QueryInterface(__uuidof(ID3D10Multithread), - getter_AddRefs(multi)); - if (multi) { - MOZ_ASSERT(multi->GetMultithreadProtected()); + if (reuseDevice) { + // Use mCompositorDevice for decoder device only for hardware WebRender. + if (aFlags.contains(DeviceFlag::isHardwareWebRenderInUse) && + mCompositorDevice && mCompositorDeviceSupportsVideo && + !mDecoderDevice) { + mDecoderDevice = mCompositorDevice; + + RefPtr<ID3D10Multithread> multi; + mDecoderDevice->QueryInterface(__uuidof(ID3D10Multithread), + getter_AddRefs(multi)); + if (multi) { + MOZ_ASSERT(multi->GetMultithreadProtected()); + } } - } - if (mDecoderDevice) { - RefPtr<ID3D11Device> dev = mDecoderDevice; - return dev.forget(); + if (mDecoderDevice) { + RefPtr<ID3D11Device> dev = mDecoderDevice; + return dev.forget(); + } } } diff --git a/gfx/thebes/DeviceManagerDx.h b/gfx/thebes/DeviceManagerDx.h index c6860c7ffa..1e4182ddec 100644 --- a/gfx/thebes/DeviceManagerDx.h +++ b/gfx/thebes/DeviceManagerDx.h @@ -6,6 +6,7 @@ #ifndef mozilla_gfx_thebes_DeviceManagerDx_h #define mozilla_gfx_thebes_DeviceManagerDx_h +#include <set> #include <vector> #include "gfxPlatform.h" @@ -58,13 +59,18 @@ class DeviceManagerDx final { static DeviceManagerDx* Get() { return sInstance; } + enum class DeviceFlag { + isHardwareWebRenderInUse, + disableDeviceReuse, + }; + using DeviceFlagSet = EnumSet<DeviceFlag, uint8_t>; RefPtr<ID3D11Device> GetCompositorDevice(); RefPtr<ID3D11Device> GetContentDevice(); RefPtr<ID3D11Device> GetCanvasDevice(); RefPtr<ID3D11Device> GetImageDevice(); RefPtr<IDCompositionDevice2> GetDirectCompositionDevice(); RefPtr<ID3D11Device> GetVRDevice(); - RefPtr<ID3D11Device> CreateDecoderDevice(bool aHardwareWebRender); + RefPtr<ID3D11Device> CreateDecoderDevice(DeviceFlagSet aFlags); RefPtr<ID3D11Device> CreateMediaEngineDevice(); IDirectDraw7* GetDirectDraw(); @@ -91,7 +97,10 @@ class DeviceManagerDx final { bool GetOutputFromMonitor(HMONITOR monitor, RefPtr<IDXGIOutput>* aOutOutput); void PostUpdateMonitorInfo(); + void UpdateMonitorInfo(); bool SystemHDREnabled(); + bool WindowHDREnabled(HWND aWindow); + bool MonitorHDREnabled(HMONITOR aMonitor); // Check if the current adapter supports hardware stretching void CheckHardwareStretchingSupport(HwStretchingSupport& aRv); @@ -177,7 +186,6 @@ class DeviceManagerDx final { bool GetAnyDeviceRemovedReason(DeviceResetReason* aOutReason) MOZ_REQUIRES(mDeviceLock); - void UpdateMonitorInfo(); std::vector<DXGI_OUTPUT_DESC1> GetOutputDescs(); private: @@ -193,6 +201,7 @@ class DeviceManagerDx final { mutable mozilla::Mutex mDeviceLock; nsTArray<D3D_FEATURE_LEVEL> mFeatureLevels MOZ_GUARDED_BY(mDeviceLock); RefPtr<IDXGIAdapter1> mAdapter MOZ_GUARDED_BY(mDeviceLock); + RefPtr<IDXGIFactory1> mFactory MOZ_GUARDED_BY(mDeviceLock); RefPtr<ID3D11Device> mCompositorDevice MOZ_GUARDED_BY(mDeviceLock); RefPtr<ID3D11Device> mContentDevice MOZ_GUARDED_BY(mDeviceLock); RefPtr<ID3D11Device> mCanvasDevice MOZ_GUARDED_BY(mDeviceLock); @@ -208,6 +217,7 @@ class DeviceManagerDx final { Maybe<DeviceResetReason> mDeviceResetReason MOZ_GUARDED_BY(mDeviceLock); RefPtr<Runnable> mUpdateMonitorInfoRunnable MOZ_GUARDED_BY(mDeviceLock); Maybe<bool> mSystemHdrEnabled MOZ_GUARDED_BY(mDeviceLock); + std::set<HMONITOR> mHdrMonitors MOZ_GUARDED_BY(mDeviceLock); nsModuleHandle mDirectDrawDLL; RefPtr<IDirectDraw7> mDirectDraw; diff --git a/gfx/thebes/gfxAndroidPlatform.cpp b/gfx/thebes/gfxAndroidPlatform.cpp index a121d5550a..fbc3d3fa80 100644 --- a/gfx/thebes/gfxAndroidPlatform.cpp +++ b/gfx/thebes/gfxAndroidPlatform.cpp @@ -89,8 +89,12 @@ gfxAndroidPlatform::gfxAndroidPlatform() { RegisterStrongMemoryReporter(new FreetypeReporter()); - mOffscreenFormat = GetScreenDepth() == 16 ? SurfaceFormat::R5G6B5_UINT16 - : SurfaceFormat::X8R8G8B8_UINT32; + // Bug 1886573: At this point, we don't yet have primary screen depth. + // This setting of screen depth to 0 is preserving existing behavior, + // and should be fixed. + int32_t screenDepth = 0; + mOffscreenFormat = screenDepth == 16 ? SurfaceFormat::R5G6B5_UINT16 + : SurfaceFormat::X8R8G8B8_UINT32; if (StaticPrefs::gfx_android_rgb16_force_AtStartup()) { mOffscreenFormat = SurfaceFormat::R5G6B5_UINT16; diff --git a/gfx/thebes/gfxFont.cpp b/gfx/thebes/gfxFont.cpp index 618eb49455..c17e786d4f 100644 --- a/gfx/thebes/gfxFont.cpp +++ b/gfx/thebes/gfxFont.cpp @@ -729,15 +729,6 @@ void gfxShapedText::SetupClusterBoundaries(uint32_t aOffset, // preceding letter by any letter-spacing or justification. const char16_t kBengaliVirama = 0x09CD; const char16_t kBengaliYa = 0x09AF; - // Characters treated as hyphens for the purpose of "emergency" breaking - // when the content would otherwise overflow. - auto isHyphen = [](char16_t c) { - return c == char16_t('-') || // HYPHEN-MINUS - c == 0x2010 || // HYPHEN - c == 0x2012 || // FIGURE DASH - c == 0x2013 || // EN DASH - c == 0x058A; // ARMENIAN HYPHEN - }; bool prevWasHyphen = false; while (pos < aLength) { const char16_t ch = aString[pos]; @@ -750,7 +741,7 @@ void gfxShapedText::SetupClusterBoundaries(uint32_t aOffset, } if (ch == char16_t(' ') || ch == kIdeographicSpace) { glyphs[pos].SetIsSpace(); - } else if (isHyphen(ch) && pos && + } else if (nsContentUtils::IsHyphen(ch) && pos && nsContentUtils::IsAlphanumeric(aString[pos - 1])) { prevWasHyphen = true; } else if (ch == kBengaliYa) { @@ -1006,6 +997,10 @@ gfxFont::gfxFont(const RefPtr<UnscaledFont>& aUnscaledFont, } mKerningSet = HasFeatureSet(HB_TAG('k', 'e', 'r', 'n'), mKerningEnabled); + + // Ensure the gfxFontEntry's unitsPerEm and extents fields are initialized, + // so that GetFontExtents can use them without risk of races. + Unused << mFontEntry->UnitsPerEm(); } gfxFont::~gfxFont() { diff --git a/gfx/thebes/gfxFontEntry.cpp b/gfx/thebes/gfxFontEntry.cpp index a9fe04125c..7ff5f82a85 100644 --- a/gfx/thebes/gfxFontEntry.cpp +++ b/gfx/thebes/gfxFontEntry.cpp @@ -259,14 +259,22 @@ already_AddRefed<gfxFont> gfxFontEntry::FindOrMakeFont( } uint16_t gfxFontEntry::UnitsPerEm() { + { + AutoReadLock lock(mLock); + if (mUnitsPerEm) { + return mUnitsPerEm; + } + } + + AutoTable headTable(this, TRUETYPE_TAG('h', 'e', 'a', 'd')); + AutoWriteLock lock(mLock); + if (!mUnitsPerEm) { - AutoTable headTable(this, TRUETYPE_TAG('h', 'e', 'a', 'd')); if (headTable) { uint32_t len; const HeadTable* head = reinterpret_cast<const HeadTable*>(hb_blob_get_data(headTable, &len)); if (len >= sizeof(HeadTable)) { - mUnitsPerEm = head->unitsPerEm; if (int16_t(head->xMax) > int16_t(head->xMin) && int16_t(head->yMax) > int16_t(head->yMin)) { mXMin = head->xMin; @@ -274,6 +282,7 @@ uint16_t gfxFontEntry::UnitsPerEm() { mXMax = head->xMax; mYMax = head->yMax; } + mUnitsPerEm = head->unitsPerEm; } } @@ -283,12 +292,13 @@ uint16_t gfxFontEntry::UnitsPerEm() { mUnitsPerEm = kInvalidUPEM; } } + return mUnitsPerEm; } bool gfxFontEntry::HasSVGGlyph(uint32_t aGlyphId) { - NS_ASSERTION(mSVGInitialized, - "SVG data has not yet been loaded. TryGetSVGData() first."); + MOZ_ASSERT(mSVGInitialized, + "SVG data has not yet been loaded. TryGetSVGData() first."); return GetSVGGlyphs()->HasSVGGlyph(aGlyphId); } @@ -306,8 +316,8 @@ bool gfxFontEntry::GetSVGGlyphExtents(DrawTarget* aDrawTarget, void gfxFontEntry::RenderSVGGlyph(gfxContext* aContext, uint32_t aGlyphId, SVGContextPaint* aContextPaint) { - NS_ASSERTION(mSVGInitialized, - "SVG data has not yet been loaded. TryGetSVGData() first."); + MOZ_ASSERT(mSVGInitialized, + "SVG data has not yet been loaded. TryGetSVGData() first."); GetSVGGlyphs()->RenderGlyph(aContext, aGlyphId, aContextPaint); } @@ -464,8 +474,9 @@ hb_blob_t* gfxFontEntry::FontTableHashEntry::ShareTableAndGetBlob( HB_MEMORY_MODE_READONLY, mSharedBlobData, DeleteFontTableBlobData); if (mBlob == hb_blob_get_empty()) { // The FontTableBlobData was destroyed during hb_blob_create(). - // The (empty) blob is still be held in the hashtable with a strong + // The (empty) blob will still be held in the hashtable with a strong // reference. + mSharedBlobData = nullptr; return hb_blob_reference(mBlob); } diff --git a/gfx/thebes/gfxFontEntry.h b/gfx/thebes/gfxFontEntry.h index 888b85f2c9..364cf6c79e 100644 --- a/gfx/thebes/gfxFontEntry.h +++ b/gfx/thebes/gfxFontEntry.h @@ -538,6 +538,9 @@ class gfxFontEntry { mozilla::gfx::Rect GetFontExtents(float aFUnitScaleFactor) const { // Flip the y-axis here to match the orientation of Gecko's coordinates. + // We don't need to take a lock here because the min/max fields are inert + // after initialization, and we make sure to initialize them at gfxFont- + // creation time. return mozilla::gfx::Rect(float(mXMin) * aFUnitScaleFactor, float(-mYMax) * aFUnitScaleFactor, float(mXMax - mXMin) * aFUnitScaleFactor, diff --git a/gfx/thebes/gfxPlatform.cpp b/gfx/thebes/gfxPlatform.cpp index c57564b054..ccc58213ef 100644 --- a/gfx/thebes/gfxPlatform.cpp +++ b/gfx/thebes/gfxPlatform.cpp @@ -445,8 +445,7 @@ gfxPlatform::gfxPlatform() mDisplayInfoCollector(this, &gfxPlatform::GetDisplayInfo), mOverlayInfoCollector(this, &gfxPlatform::GetOverlayInfo), mSwapChainInfoCollector(this, &gfxPlatform::GetSwapChainInfo), - mCompositorBackend(layers::LayersBackend::LAYERS_NONE), - mScreenDepth(0) { + mCompositorBackend(layers::LayersBackend::LAYERS_NONE) { mAllowDownloadableFonts = UNINITIALIZED_VALUE; InitBackendPrefs(GetBackendPrefs()); @@ -1064,6 +1063,13 @@ void gfxPlatform::ReportTelemetry() { mozilla::glean::gfx_display::count.Set(screenCount); mozilla::glean::gfx_display::primary_height.Set(rect.Height()); mozilla::glean::gfx_display::primary_width.Set(rect.Width()); + + // Check if any screen known by screenManager supports HDR. + bool supportsHDR = false; + for (const auto& screen : screenManager.CurrentScreenList()) { + supportsHDR |= screen->GetIsHDR(); + } + Telemetry::ScalarSet(Telemetry::ScalarID::GFX_SUPPORTS_HDR, supportsHDR); } nsString adapterDesc; @@ -1120,10 +1126,6 @@ void gfxPlatform::ReportTelemetry() { NS_ConvertUTF16toUTF8(adapterDriverDate)); mozilla::glean::gfx_status::headless.Set(IsHeadless()); - - MOZ_ASSERT(gPlatform, "Need gPlatform to generate some telemetry."); - Telemetry::ScalarSet(Telemetry::ScalarID::GFX_SUPPORTS_HDR, - gPlatform->SupportsHDR()); } static bool IsFeatureSupported(long aFeature, bool aDefault) { @@ -1607,6 +1609,12 @@ already_AddRefed<DataSourceSurface> gfxPlatform::GetWrappedDataSourceSurface( } void gfxPlatform::PopulateScreenInfo() { + // We're only going to set some gfxVars here, which is only possible from + // the parent process. + if (!XRE_IsParentProcess()) { + return; + } + nsCOMPtr<nsIScreenManager> manager = do_GetService("@mozilla.org/gfx/screenmanager;1"); MOZ_ASSERT(manager, "failed to get nsIScreenManager"); @@ -1618,13 +1626,9 @@ void gfxPlatform::PopulateScreenInfo() { return; } - screen->GetColorDepth(&mScreenDepth); - if (XRE_IsParentProcess()) { - gfxVars::SetScreenDepth(mScreenDepth); - } - - int left, top; - screen->GetRect(&left, &top, &mScreenSize.width, &mScreenSize.height); + int32_t screenDepth; + screen->GetColorDepth(&screenDepth); + gfxVars::SetPrimaryScreenDepth(screenDepth); } bool gfxPlatform::SupportsAzureContentForDrawTarget(DrawTarget* aTarget) { @@ -3158,6 +3162,13 @@ void gfxPlatform::InitWebGPUConfig() { #endif gfxVars::SetAllowWebGPU(feature.IsEnabled()); + +#if XP_WIN + if (IsWin10CreatorsUpdateOrLater() && + StaticPrefs::dom_webgpu_allow_present_without_readback()) { + gfxVars::SetAllowWebGPUPresentWithoutReadback(true); + } +#endif } #ifdef XP_WIN @@ -3707,8 +3718,7 @@ uint32_t gfxPlatform::TargetFrameRate() { /* static */ bool gfxPlatform::UseDesktopZoomingScrollbars() { - return StaticPrefs::apz_allow_zooming() && - !StaticPrefs::apz_force_disable_desktop_zooming_scrollbars(); + return StaticPrefs::apz_allow_zooming(); } /*static*/ diff --git a/gfx/thebes/gfxPlatform.h b/gfx/thebes/gfxPlatform.h index 5af6d77345..452e7208b4 100644 --- a/gfx/thebes/gfxPlatform.h +++ b/gfx/thebes/gfxPlatform.h @@ -646,9 +646,6 @@ class gfxPlatform : public mozilla::layers::MemoryPressureListener { */ static mozilla::LogModule* GetLog(eGfxLog aWhichLog); - int GetScreenDepth() const { return mScreenDepth; } - mozilla::gfx::IntSize GetScreenSize() const { return mScreenSize; } - static void PurgeSkiaFontCache(); static bool UsesOffMainThreadCompositing(); @@ -811,8 +808,6 @@ class gfxPlatform : public mozilla::layers::MemoryPressureListener { static bool UseDesktopZoomingScrollbars(); - virtual bool SupportsHDR() { return false; } - protected: gfxPlatform(); virtual ~gfxPlatform(); @@ -982,7 +977,7 @@ class gfxPlatform : public mozilla::layers::MemoryPressureListener { static void ShutdownCMS(); /** - * This uses nsIScreenManager to determine the screen size and color depth + * This uses nsIScreenManager to determine the primary screen color depth */ void PopulateScreenInfo(); @@ -1027,9 +1022,6 @@ class gfxPlatform : public mozilla::layers::MemoryPressureListener { // created yet. mozilla::layers::LayersBackend mCompositorBackend; - int32_t mScreenDepth; - mozilla::gfx::IntSize mScreenSize; - mozilla::Maybe<mozilla::layers::OverlayInfo> mOverlayInfo; mozilla::Maybe<mozilla::layers::SwapChainInfo> mSwapChainInfo; diff --git a/gfx/thebes/gfxPlatformFontList.cpp b/gfx/thebes/gfxPlatformFontList.cpp index 404d291e03..6c1d641509 100644 --- a/gfx/thebes/gfxPlatformFontList.cpp +++ b/gfx/thebes/gfxPlatformFontList.cpp @@ -435,12 +435,6 @@ void gfxPlatformFontList::ApplyWhitelist() { AutoTArray<RefPtr<gfxFontFamily>, 128> accepted; bool whitelistedFontFound = false; for (const auto& entry : mFontFamilies) { - if (entry.GetData()->IsHidden()) { - // Hidden system fonts are exempt from whitelisting, but don't count - // towards determining whether we "kept" any (user-visible) fonts - accepted.AppendElement(entry.GetData()); - continue; - } nsAutoCString fontFamilyName(entry.GetKey()); ToLowerCase(fontFamilyName); if (familyNamesWhitelist.Contains(fontFamilyName)) { @@ -476,8 +470,7 @@ void gfxPlatformFontList::ApplyWhitelist( AutoTArray<fontlist::Family::InitData, 128> accepted; bool keptNonHidden = false; for (auto& f : aFamilies) { - if (f.mVisibility == FontVisibility::Hidden || - familyNamesWhitelist.Contains(f.mKey)) { + if (familyNamesWhitelist.Contains(f.mKey)) { accepted.AppendElement(f); if (f.mVisibility != FontVisibility::Hidden) { keptNonHidden = true; diff --git a/gfx/thebes/gfxPlatformFontList.h b/gfx/thebes/gfxPlatformFontList.h index 3ab74c5f74..5a53d8b9ca 100644 --- a/gfx/thebes/gfxPlatformFontList.h +++ b/gfx/thebes/gfxPlatformFontList.h @@ -124,7 +124,7 @@ class ShmemCharMapHashEntry final : public PLDHashEntryHdr { return aCharMap->GetChecksum(); } - enum { ALLOW_MEMMOVE = true }; + enum { ALLOW_MEMMOVE = false }; // because of the Pointer member private: // charMaps are stored in the shared memory that FontList objects point to, diff --git a/gfx/thebes/gfxPlatformMac.cpp b/gfx/thebes/gfxPlatformMac.cpp index 091b0dff28..5c99d389c8 100644 --- a/gfx/thebes/gfxPlatformMac.cpp +++ b/gfx/thebes/gfxPlatformMac.cpp @@ -965,27 +965,6 @@ gfxPlatformMac::CreateGlobalHardwareVsyncSource() { #endif } -bool gfxPlatformMac::SupportsHDR() { - // HDR has 3 requirements: - // 1) high peak brightness - // 2) high contrast ratio - // 3) color depth > 24 - if (GetScreenDepth() <= 24) { - return false; - } - -#ifdef MOZ_WIDGET_UIKIT - return false; -#elif defined(EARLY_BETA_OR_EARLIER) - // Screen is capable. Is the OS capable? - // More-or-less supported in Catalina. - return true; -#else - // Definitely supported in Big Sur. - return nsCocoaFeatures::OnBigSurOrLater(); -#endif -} - nsTArray<uint8_t> gfxPlatformMac::GetPlatformCMSOutputProfileData() { nsTArray<uint8_t> prefProfileData = GetPrefCMSOutputProfileData(); if (!prefProfileData.IsEmpty()) { diff --git a/gfx/thebes/gfxPlatformMac.h b/gfx/thebes/gfxPlatformMac.h index 29bbd7e877..00ab2c1ca8 100644 --- a/gfx/thebes/gfxPlatformMac.h +++ b/gfx/thebes/gfxPlatformMac.h @@ -76,8 +76,6 @@ class gfxPlatformMac : public gfxPlatform { static bool CheckVariationFontSupport(); - bool SupportsHDR() override; - protected: bool AccelerateLayersByDefault() override; diff --git a/gfx/thebes/gfxTextRun.h b/gfx/thebes/gfxTextRun.h index 770370c9b1..61cdd3b251 100644 --- a/gfx/thebes/gfxTextRun.h +++ b/gfx/thebes/gfxTextRun.h @@ -122,7 +122,7 @@ class gfxTextRun : public gfxShapedText { } // Returns a gfxShapedText::CompressedGlyph::FLAG_BREAK_TYPE_* value - // as defined in gfxFont.h (may be NONE, NORMAL or HYPHEN). + // as defined in gfxFont.h (may be NONE, NORMAL, HYPHEN or EMERGENCY_WRAP). uint8_t CanBreakBefore(uint32_t aPos) const { MOZ_ASSERT(aPos < GetLength()); return mCharacterGlyphs[aPos].CanBreakBefore(); diff --git a/gfx/thebes/gfxUserFontSet.cpp b/gfx/thebes/gfxUserFontSet.cpp index e9a2f513b3..c761bd9227 100644 --- a/gfx/thebes/gfxUserFontSet.cpp +++ b/gfx/thebes/gfxUserFontSet.cpp @@ -20,6 +20,7 @@ #include "mozilla/PostTraversalTask.h" #include "mozilla/dom/WorkerCommon.h" #include "gfxOTSUtils.h" +#include "nsFontFaceLoader.h" #include "nsIFontLoadCompleteCallback.h" #include "nsProxyRelease.h" #include "nsContentUtils.h" @@ -392,6 +393,12 @@ void gfxUserFontEntry::LoadNextSrc() { } void gfxUserFontEntry::ContinueLoad() { + if (mUserFontLoadState == STATUS_NOT_LOADED) { + // We must have been cancelled (possibly due to a font-list refresh) while + // the runnable was pending, so just bail out. + return; + } + MOZ_ASSERT(mUserFontLoadState == STATUS_LOAD_PENDING); MOZ_ASSERT(mSrcList[mCurrentSrcIndex].mSourceType == gfxFontFaceSrc::eSourceType_URL); @@ -974,7 +981,8 @@ gfxUserFontSet::gfxUserFontSet() mLocalRulesUsed(false), mRebuildLocalRules(false), mDownloadCount(0), - mDownloadSize(0) { + mDownloadSize(0), + mMutex("gfxUserFontSet") { IncrementGeneration(true); } @@ -1057,7 +1065,7 @@ void gfxUserFontSet::AddUserFontEntry(const nsCString& aFamilyName, } } -void gfxUserFontSet::IncrementGeneration(bool aIsRebuild) { +void gfxUserFontSet::IncrementGenerationLocked(bool aIsRebuild) { // add one, increment again if zero do { mGeneration = ++sFontSetGeneration; @@ -1097,6 +1105,10 @@ void gfxUserFontSet::ForgetLocalFaces() { } void gfxUserFontSet::ForgetLocalFace(gfxUserFontFamily* aFontFamily) { + // Entries for which we might need to cancel a current loader. + AutoTArray<RefPtr<gfxUserFontEntry>, 8> entriesToCancel; + + // Lock the font family while we iterate over its entries. aFontFamily->ReadLock(); const auto& fonts = aFontFamily->GetFontList(); for (const auto& f : fonts) { @@ -1107,14 +1119,28 @@ void gfxUserFontSet::ForgetLocalFace(gfxUserFontFamily* aFontFamily) { ufe->GetPlatformFontEntry()->IsLocalUserFont()) { ufe->mPlatformFontEntry = nullptr; } - // We need to re-evaluate the source list in the context of the new - // platform fontlist, whether or not the entry actually used a local() - // source last time, as one might be newly available. + // If the entry had a local source, we need to re-evaluate the source list + // in the context of the new platform fontlist, whether or not the entry + // actually used a local() source last time, as one might have been added. if (ufe->mSeenLocalSource) { - ufe->LoadCanceled(); + entriesToCancel.AppendElement(ufe); } } aFontFamily->ReadUnlock(); + + // Cancel any current loaders and reset the state of the affected entries. + for (auto& ufe : entriesToCancel) { + if (auto* loader = ufe->GetLoader()) { + // If there's a loader, we need to cancel it, because we'll trigger a + // fresh load if required when we re-resolve the font... + loader->Cancel(); + RemoveLoader(loader); + } else { + // ...otherwise, just reset our state so that we'll re-evaluate the + // source list from the beginning. + ufe->LoadCanceled(); + } + } } /////////////////////////////////////////////////////////////////////////////// @@ -1209,7 +1235,7 @@ void gfxUserFontSet::UserFontCache::CacheFont(gfxFontEntry* aFontEntry) { "caching a font associated with no family yet"); // if caching is disabled, simply return - if (Preferences::GetBool("gfx.downloadable_fonts.disable_cache")) { + if (StaticPrefs::gfx_downloadable_fonts_disable_cache()) { return; } @@ -1282,8 +1308,7 @@ void gfxUserFontSet::UserFontCache::ForgetFont(gfxFontEntry* aFontEntry) { gfxFontEntry* gfxUserFontSet::UserFontCache::GetFont( const gfxFontFaceSrc& aSrc, const gfxUserFontEntry& aUserFontEntry) { - if (!sUserFonts || - Preferences::GetBool("gfx.downloadable_fonts.disable_cache")) { + if (!sUserFonts || StaticPrefs::gfx_downloadable_fonts_disable_cache()) { return nullptr; } diff --git a/gfx/thebes/gfxUserFontSet.h b/gfx/thebes/gfxUserFontSet.h index d67d10fbe6..58cd2a077f 100644 --- a/gfx/thebes/gfxUserFontSet.h +++ b/gfx/thebes/gfxUserFontSet.h @@ -11,9 +11,11 @@ #include "gfxFontEntry.h" #include "gfxFontUtils.h" #include "mozilla/AlreadyAddRefed.h" +#include "mozilla/Atomics.h" #include "mozilla/Attributes.h" #include "mozilla/FontPropertyTypes.h" #include "mozilla/MemoryReporting.h" +#include "mozilla/RecursiveMutex.h" #include "mozilla/RefPtr.h" #include "nsCOMPtr.h" #include "nsHashKeys.h" @@ -314,7 +316,11 @@ class gfxUserFontSet { uint64_t GetGeneration() { return mGeneration; } // increment the generation on font load - void IncrementGeneration(bool aIsRebuild = false); + void IncrementGeneration(bool aIsRebuild = false) { + mozilla::RecursiveMutexAutoLock lock(mMutex); + IncrementGenerationLocked(aIsRebuild); + } + void IncrementGenerationLocked(bool aIsRebuild = false) MOZ_REQUIRES(mMutex); // Generation is bumped on font loads but that doesn't affect name-style // mappings. Rebuilds do however affect name-style mappings so need to @@ -520,6 +526,9 @@ class gfxUserFontSet { // helper method for performing the actual userfont set rebuild virtual void DoRebuildUserFontSet() = 0; + // forget about a loader that has been cancelled + virtual void RemoveLoader(nsFontFaceLoader* aLoader) = 0; + // helper method for FindOrCreateUserFontEntry gfxUserFontEntry* FindExistingUserFontEntry( gfxUserFontFamily* aFamily, @@ -548,6 +557,8 @@ class gfxUserFontSet { // performance stats uint32_t mDownloadCount; uint64_t mDownloadSize; + + mutable mozilla::RecursiveMutex mMutex; }; // acts a placeholder until the real font is downloaded diff --git a/gfx/thebes/gfxWindowsPlatform.cpp b/gfx/thebes/gfxWindowsPlatform.cpp index 93de20f445..4554d487f4 100644 --- a/gfx/thebes/gfxWindowsPlatform.cpp +++ b/gfx/thebes/gfxWindowsPlatform.cpp @@ -82,6 +82,7 @@ #include "mozilla/layers/DeviceAttachmentsD3D11.h" #include "mozilla/WindowsProcessMitigations.h" #include "D3D11Checks.h" +#include "mozilla/ScreenHelperWin.h" using namespace mozilla; using namespace mozilla::gfx; @@ -258,8 +259,7 @@ class D3DSharedTexturesReporter final : public nsIMemoryReporter { NS_IMPL_ISUPPORTS(D3DSharedTexturesReporter, nsIMemoryReporter) -gfxWindowsPlatform::gfxWindowsPlatform() - : mRenderMode(RENDER_GDI), mSupportsHDR(false) { +gfxWindowsPlatform::gfxWindowsPlatform() : mRenderMode(RENDER_GDI) { // If win32k is locked down then we can't use COM STA and shouldn't need it. // Also, we won't be using any GPU memory in this process. if (!IsWin32kLockedDown()) { @@ -400,7 +400,11 @@ void gfxWindowsPlatform::InitAcceleration() { // CanUseHardwareVideoDecoding depends on DeviceManagerDx state, // so update the cached value now. UpdateCanUseHardwareVideoDecoding(); - UpdateSupportsHDR(); + + // Our ScreenHelperWin also depends on DeviceManagerDx state. + if (XRE_IsParentProcess() && !gfxPlatform::IsHeadless()) { + ScreenHelperWin::RefreshScreens(); + } RecordStartupTelemetry(); } @@ -531,53 +535,6 @@ void gfxWindowsPlatform::UpdateRenderMode() { } } -void gfxWindowsPlatform::UpdateSupportsHDR() { - // TODO: This function crashes content processes, for reasons that are not - // obvious from the crash reports. For now, this function can only be executed - // by the parent process. Therefore SupportsHDR() will always return false for - // content processes, as noted in the header. - if (!XRE_IsParentProcess()) { - return; - } - - // Set mSupportsHDR to true if any of the DeviceManager outputs have a BT2020 - // colorspace with EOTF2084 gamma curve, this indicates the system is sending - // an HDR format to at least one monitor. The colorspace returned by DXGI is - // very vague - we only see DXGI_COLOR_SPACE_RGB_FULL_G2084_NONE_P2020 for HDR - // and DXGI_COLOR_SPACE_RGB_FULL_G22_NONE_P709 for SDR modes, even if the - // monitor is using something like YCbCr444 according to Settings - // (System -> Display Settings -> Advanced Display). To get more specific - // info we would need to query the DISPLAYCONFIG values in WinGDI. - // - // Note that the bit depth used to be checked here, but as of Windows 11 22H2, - // HDR is supported with 8bpc for lower bandwidth, where DWM converts to - // dithered RGB8 rather than RGB10, which doesn't really matter here. - // - // This only returns true if there is an HDR display connected at app start, - // if the user switches to HDR to watch a video, we won't know that here, and - // if no displays are connected we return false (e.g. if Windows Update - // restarted a laptop with its lid closed and no external displays, we will - // see zero outputs here when the app is restarted automatically). - // - // It would be better to track if HDR is ever used and report that telemetry - // so we know if HDR matters, not just when it is detected at app start. - // - // Further reading: - // https://learn.microsoft.com/en-us/windows/win32/direct3darticles/high-dynamic-range - // https://learn.microsoft.com/en-us/windows/win32/api/wingdi/ns-wingdi-displayconfig_sdr_white_level - DeviceManagerDx* dx = DeviceManagerDx::Get(); - nsTArray<DXGI_OUTPUT_DESC1> outputs = dx->EnumerateOutputs(); - - for (auto& output : outputs) { - if (output.ColorSpace == DXGI_COLOR_SPACE_RGB_FULL_G2084_NONE_P2020) { - mSupportsHDR = true; - return; - } - } - - mSupportsHDR = false; -} - mozilla::gfx::BackendType gfxWindowsPlatform::GetContentBackendFor( mozilla::layers::LayersBackend aLayers) { mozilla::gfx::BackendType defaultBackend = diff --git a/gfx/thebes/gfxWindowsPlatform.h b/gfx/thebes/gfxWindowsPlatform.h index 0956a384b0..12efaefc40 100644 --- a/gfx/thebes/gfxWindowsPlatform.h +++ b/gfx/thebes/gfxWindowsPlatform.h @@ -193,9 +193,6 @@ class gfxWindowsPlatform final : public gfxPlatform { static bool CheckVariationFontSupport(); - // Always false for content processes. - bool SupportsHDR() override { return mSupportsHDR; } - protected: bool AccelerateLayersByDefault() override { return true; } @@ -214,10 +211,7 @@ class gfxWindowsPlatform final : public gfxPlatform { BackendPrefsData GetBackendPrefs() const override; - void UpdateSupportsHDR(); - RenderMode mRenderMode; - bool mSupportsHDR; private: void Init(); diff --git a/gfx/webrender_bindings/DCLayerTree.cpp b/gfx/webrender_bindings/DCLayerTree.cpp index 177979a466..49ca8ee77b 100644 --- a/gfx/webrender_bindings/DCLayerTree.cpp +++ b/gfx/webrender_bindings/DCLayerTree.cpp @@ -70,8 +70,8 @@ UniquePtr<DCLayerTree> DCLayerTree::Create(gl::GLContext* aGL, return nullptr; } - auto layerTree = - MakeUnique<DCLayerTree>(aGL, aEGLConfig, aDevice, aCtx, dCompDevice); + auto layerTree = MakeUnique<DCLayerTree>(aGL, aEGLConfig, aDevice, aCtx, + aHwnd, dCompDevice); if (!layerTree->Initialize(aHwnd, aError)) { return nullptr; } @@ -83,11 +83,12 @@ void DCLayerTree::Shutdown() { DCLayerTree::sGpuOverlayInfo = nullptr; } DCLayerTree::DCLayerTree(gl::GLContext* aGL, EGLConfig aEGLConfig, ID3D11Device* aDevice, ID3D11DeviceContext* aCtx, - IDCompositionDevice2* aCompositionDevice) + HWND aHwnd, IDCompositionDevice2* aCompositionDevice) : mGL(aGL), mEGLConfig(aEGLConfig), mDevice(aDevice), mCtx(aCtx), + mHwnd(aHwnd), mCompositionDevice(aCompositionDevice), mDebugCounter(false), mDebugVisualRedrawRegions(false), @@ -1358,7 +1359,8 @@ bool DCSurfaceVideo::CalculateSwapChainSize(gfx::Matrix& aTransform) { GetVpAutoHDRSupported(vendorId, mDCLayerTree->GetVideoContext(), mDCLayerTree->GetVideoProcessor()); const bool contentIsHDR = false; // XXX for now, only non-HDR is supported. - const bool monitorIsHDR = gfx::DeviceManagerDx::Get()->SystemHDREnabled(); + const bool monitorIsHDR = + gfx::DeviceManagerDx::Get()->WindowHDREnabled(mDCLayerTree->GetHwnd()); const bool powerIsCharging = RenderThread::Get()->GetPowerIsCharging(); bool useVpAutoHDR = gfx::gfxVars::WebRenderOverlayVpAutoHDR() && diff --git a/gfx/webrender_bindings/DCLayerTree.h b/gfx/webrender_bindings/DCLayerTree.h index 6d3a611802..d5ade5781e 100644 --- a/gfx/webrender_bindings/DCLayerTree.h +++ b/gfx/webrender_bindings/DCLayerTree.h @@ -113,7 +113,7 @@ class DCLayerTree { explicit DCLayerTree(gl::GLContext* aGL, EGLConfig aEGLConfig, ID3D11Device* aDevice, ID3D11DeviceContext* aCtx, - IDCompositionDevice2* aCompositionDevice); + HWND aHwnd, IDCompositionDevice2* aCompositionDevice); ~DCLayerTree(); void SetDefaultSwapChain(IDXGISwapChain1* aSwapChain); @@ -159,6 +159,8 @@ class DCLayerTree { DCSurface* GetSurface(wr::NativeSurfaceId aId) const; + HWND GetHwnd() const { return mHwnd; } + // Get or create an FBO with depth buffer suitable for specified dimensions GLuint GetOrCreateFbo(int aWidth, int aHeight); @@ -187,6 +189,7 @@ class DCLayerTree { RefPtr<ID3D11Device> mDevice; RefPtr<ID3D11DeviceContext> mCtx; + HWND mHwnd; RefPtr<IDCompositionDevice2> mCompositionDevice; RefPtr<IDCompositionTarget> mCompositionTarget; diff --git a/gfx/webrender_bindings/src/bindings.rs b/gfx/webrender_bindings/src/bindings.rs index 047791c76b..3fc93fdf13 100644 --- a/gfx/webrender_bindings/src/bindings.rs +++ b/gfx/webrender_bindings/src/bindings.rs @@ -31,15 +31,13 @@ use thin_vec::ThinVec; use euclid::SideOffsets2D; use moz2d_renderer::Moz2dBlobImageHandler; use nsstring::nsAString; -use num_cpus; use program_cache::{remove_disk_cache, WrProgramCache}; -use rayon; use tracy_rs::register_thread_with_profiler; use webrender::sw_compositor::SwCompositor; use webrender::{ api::units::*, api::*, create_webrender_instance, render_api::*, set_profiler_hooks, AsyncPropertySampler, AsyncScreenshotHandle, Compositor, CompositorCapabilities, CompositorConfig, CompositorSurfaceTransform, - DebugFlags, Device, MappableCompositor, MappedTileInfo, NativeSurfaceId, NativeSurfaceInfo, NativeTileId, + Device, MappableCompositor, MappedTileInfo, NativeSurfaceId, NativeSurfaceInfo, NativeTileId, PartialPresentCompositor, PipelineInfo, ProfilerHooks, RecordedFrameHandle, Renderer, RendererStats, SWGLCompositeSurfaceInfo, SceneBuilderHooks, ShaderPrecacheFlags, Shaders, SharedShaders, TextureCacheConfig, UploadMethod, WebRenderOptions, WindowVisibility, ONE_TIME_USAGE_HINT, diff --git a/gfx/webrender_bindings/src/moz2d_renderer.rs b/gfx/webrender_bindings/src/moz2d_renderer.rs index ca1e76f96f..10006319bb 100644 --- a/gfx/webrender_bindings/src/moz2d_renderer.rs +++ b/gfx/webrender_bindings/src/moz2d_renderer.rs @@ -19,7 +19,6 @@ use webrender::api::units::{BlobDirtyRect, BlobToDeviceTranslation, DeviceIntRec use webrender::api::*; use euclid::point2; -use std; use std::collections::btree_map::BTreeMap; use std::collections::hash_map; use std::collections::hash_map::HashMap; @@ -30,9 +29,6 @@ use std::os::raw::c_void; use std::ptr; use std::sync::Arc; -#[cfg(target_os = "windows")] -use dwrote; - #[cfg(any(target_os = "macos", target_os = "ios"))] use core_foundation::string::CFString; #[cfg(any(target_os = "macos", target_os = "ios"))] @@ -182,12 +178,6 @@ struct BlobReader<'a> { begin: usize, } -#[derive(PartialEq, Debug, Eq, Clone, Copy)] -struct IntPoint { - x: i32, - y: i32, -} - /// The metadata for each display item in a blob image (doesn't match the serialized layout). /// /// See BlobReader above for detailed docs of the blob image format. diff --git a/gfx/webrender_bindings/src/program_cache.rs b/gfx/webrender_bindings/src/program_cache.rs index 5db61fc8b5..d5ad21654f 100644 --- a/gfx/webrender_bindings/src/program_cache.rs +++ b/gfx/webrender_bindings/src/program_cache.rs @@ -11,8 +11,6 @@ use std::path::{Path, PathBuf}; use std::rc::Rc; use std::sync::Arc; -use bincode; -use fxhash; use nsstring::nsAString; use rayon::ThreadPool; use webrender::{ProgramBinary, ProgramCache, ProgramCacheObserver, ProgramSourceDigest}; diff --git a/gfx/wgpu_bindings/Cargo.toml b/gfx/wgpu_bindings/Cargo.toml index 21b4b8c7f7..d22253deed 100644 --- a/gfx/wgpu_bindings/Cargo.toml +++ b/gfx/wgpu_bindings/Cargo.toml @@ -17,7 +17,7 @@ default = [] [dependencies.wgc] package = "wgpu-core" git = "https://github.com/gfx-rs/wgpu" -rev = "6040820099bc72b827a6a5f53d66dda3e301f944" +rev = "0c5bebca514eb06d9387f87666c1c658f3f673b4" # TODO: remove the replay feature on the next update containing https://github.com/gfx-rs/wgpu/pull/5182 features = ["serde", "replay", "trace", "strict_asserts", "wgsl", "api_log_info"] @@ -26,37 +26,37 @@ features = ["serde", "replay", "trace", "strict_asserts", "wgsl", "api_log_info" [target.'cfg(any(target_os = "macos", target_os = "ios"))'.dependencies.wgc] package = "wgpu-core" git = "https://github.com/gfx-rs/wgpu" -rev = "6040820099bc72b827a6a5f53d66dda3e301f944" +rev = "0c5bebca514eb06d9387f87666c1c658f3f673b4" features = ["metal"] # We want the wgpu-core Direct3D backends on Windows. [target.'cfg(windows)'.dependencies.wgc] package = "wgpu-core" git = "https://github.com/gfx-rs/wgpu" -rev = "6040820099bc72b827a6a5f53d66dda3e301f944" +rev = "0c5bebca514eb06d9387f87666c1c658f3f673b4" features = ["dx12"] # We want the wgpu-core Vulkan backend on Linux and Windows. [target.'cfg(any(windows, all(unix, not(any(target_os = "macos", target_os = "ios")))))'.dependencies.wgc] package = "wgpu-core" git = "https://github.com/gfx-rs/wgpu" -rev = "6040820099bc72b827a6a5f53d66dda3e301f944" +rev = "0c5bebca514eb06d9387f87666c1c658f3f673b4" features = ["vulkan"] [dependencies.wgt] package = "wgpu-types" git = "https://github.com/gfx-rs/wgpu" -rev = "6040820099bc72b827a6a5f53d66dda3e301f944" +rev = "0c5bebca514eb06d9387f87666c1c658f3f673b4" [dependencies.wgh] package = "wgpu-hal" git = "https://github.com/gfx-rs/wgpu" -rev = "6040820099bc72b827a6a5f53d66dda3e301f944" +rev = "0c5bebca514eb06d9387f87666c1c658f3f673b4" features = ["windows_rs"] [target.'cfg(windows)'.dependencies.d3d12] git = "https://github.com/gfx-rs/wgpu" -rev = "6040820099bc72b827a6a5f53d66dda3e301f944" +rev = "0c5bebca514eb06d9387f87666c1c658f3f673b4" [target.'cfg(windows)'.dependencies] winapi = "0.3" diff --git a/gfx/wgpu_bindings/moz.yaml b/gfx/wgpu_bindings/moz.yaml index ffa746cb46..2f688461b7 100644 --- a/gfx/wgpu_bindings/moz.yaml +++ b/gfx/wgpu_bindings/moz.yaml @@ -20,11 +20,11 @@ origin: # Human-readable identifier for this version/release # Generally "version NNN", "tag SSS", "bookmark SSS" - release: 6040820099bc72b827a6a5f53d66dda3e301f944 (2024-03-12T14:49:44Z). + release: 0c5bebca514eb06d9387f87666c1c658f3f673b4 (2024-04-02T20:12:28Z). # Revision to pull in # Must be a long or short commit SHA (long preferred) - revision: 6040820099bc72b827a6a5f53d66dda3e301f944 + revision: 0c5bebca514eb06d9387f87666c1c658f3f673b4 license: ['MIT', 'Apache-2.0'] diff --git a/gfx/wgpu_bindings/src/client.rs b/gfx/wgpu_bindings/src/client.rs index c49dbea7a5..ae1a5ef5ea 100644 --- a/gfx/wgpu_bindings/src/client.rs +++ b/gfx/wgpu_bindings/src/client.rs @@ -525,6 +525,20 @@ pub extern "C" fn wgpu_client_make_buffer_id( } #[no_mangle] +pub extern "C" fn wgpu_client_free_buffer_id( + client: &Client, + id: id::BufferId, +) { + let backend = id.backend(); + client + .identities + .lock() + .select(backend) + .buffers + .free(id) +} + +#[no_mangle] pub extern "C" fn wgpu_client_create_texture( client: &Client, device_id: id::DeviceId, @@ -555,6 +569,21 @@ pub extern "C" fn wgpu_client_create_texture( } #[no_mangle] +pub extern "C" fn wgpu_client_free_texture_id( + client: &Client, + id: id::TextureId, +) { + let backend = id.backend(); + client + .identities + .lock() + .select(backend) + .textures + .free(id) +} + + +#[no_mangle] pub extern "C" fn wgpu_client_create_texture_view( client: &Client, device_id: id::DeviceId, @@ -590,6 +619,20 @@ pub extern "C" fn wgpu_client_create_texture_view( } #[no_mangle] +pub extern "C" fn wgpu_client_free_texture_view_id( + client: &Client, + id: id::TextureViewId, +) { + let backend = id.backend(); + client + .identities + .lock() + .select(backend) + .texture_views + .free(id) +} + +#[no_mangle] pub extern "C" fn wgpu_client_create_sampler( client: &Client, device_id: id::DeviceId, @@ -624,6 +667,20 @@ pub extern "C" fn wgpu_client_create_sampler( } #[no_mangle] +pub extern "C" fn wgpu_client_free_sampler_id( + client: &Client, + id: id::SamplerId, +) { + let backend = id.backend(); + client + .identities + .lock() + .select(backend) + .samplers + .free(id) +} + +#[no_mangle] pub extern "C" fn wgpu_client_make_encoder_id( client: &Client, device_id: id::DeviceId, @@ -639,6 +696,21 @@ pub extern "C" fn wgpu_client_make_encoder_id( } #[no_mangle] +pub extern "C" fn wgpu_client_free_command_encoder_id( + client: &Client, + id: id::CommandEncoderId, +) { + let backend = id.backend(); + client + .identities + .lock() + .select(backend) + .command_buffers + .free(id.transmute()) +} + + +#[no_mangle] pub extern "C" fn wgpu_client_create_command_encoder( client: &Client, device_id: id::DeviceId, @@ -700,6 +772,7 @@ pub extern "C" fn wgpu_device_create_render_bundle_encoder( } } + #[no_mangle] pub unsafe extern "C" fn wgpu_render_bundle_encoder_destroy( pass: *mut wgc::command::RenderBundleEncoder, @@ -755,6 +828,20 @@ pub unsafe extern "C" fn wgpu_client_create_render_bundle_error( id } +#[no_mangle] +pub extern "C" fn wgpu_client_free_render_bundle_id( + client: &Client, + id: id::RenderBundleId, +) { + let backend = id.backend(); + client + .identities + .lock() + .select(backend) + .render_bundles + .free(id) +} + #[repr(C)] pub struct ComputePassDescriptor<'a> { pub label: Option<&'a nsACString>, @@ -986,6 +1073,20 @@ pub unsafe extern "C" fn wgpu_client_create_bind_group_layout( } #[no_mangle] +pub extern "C" fn wgpu_client_free_bind_group_layout_id( + client: &Client, + id: id::BindGroupLayoutId, +) { + let backend = id.backend(); + client + .identities + .lock() + .select(backend) + .bind_group_layouts + .free(id) +} + +#[no_mangle] pub unsafe extern "C" fn wgpu_client_render_pipeline_get_bind_group_layout( client: &Client, pipeline_id: id::RenderPipelineId, @@ -1059,6 +1160,20 @@ pub unsafe extern "C" fn wgpu_client_create_pipeline_layout( } #[no_mangle] +pub extern "C" fn wgpu_client_free_pipeline_layout_id( + client: &Client, + id: id::PipelineLayoutId, +) { + let backend = id.backend(); + client + .identities + .lock() + .select(backend) + .pipeline_layouts + .free(id) +} + +#[no_mangle] pub unsafe extern "C" fn wgpu_client_create_bind_group( client: &Client, device_id: id::DeviceId, @@ -1106,6 +1221,20 @@ pub unsafe extern "C" fn wgpu_client_create_bind_group( } #[no_mangle] +pub extern "C" fn wgpu_client_free_bind_group_id( + client: &Client, + id: id::BindGroupId, +) { + let backend = id.backend(); + client + .identities + .lock() + .select(backend) + .bind_groups + .free(id) +} + +#[no_mangle] pub extern "C" fn wgpu_client_make_shader_module_id( client: &Client, device_id: id::DeviceId, @@ -1120,6 +1249,20 @@ pub extern "C" fn wgpu_client_make_shader_module_id( } #[no_mangle] +pub extern "C" fn wgpu_client_free_shader_module_id( + client: &Client, + id: id::ShaderModuleId, +) { + let backend = id.backend(); + client + .identities + .lock() + .select(backend) + .shader_modules + .free(id) +} + +#[no_mangle] pub unsafe extern "C" fn wgpu_client_create_compute_pipeline( client: &Client, device_id: id::DeviceId, @@ -1161,6 +1304,20 @@ pub unsafe extern "C" fn wgpu_client_create_compute_pipeline( } #[no_mangle] +pub extern "C" fn wgpu_client_free_compute_pipeline_id( + client: &Client, + id: id::ComputePipelineId, +) { + let backend = id.backend(); + client + .identities + .lock() + .select(backend) + .compute_pipelines + .free(id) +} + +#[no_mangle] pub unsafe extern "C" fn wgpu_client_create_render_pipeline( client: &Client, device_id: id::DeviceId, @@ -1204,6 +1361,20 @@ pub unsafe extern "C" fn wgpu_client_create_render_pipeline( } #[no_mangle] +pub extern "C" fn wgpu_client_free_render_pipeline_id( + client: &Client, + id: id::RenderPipelineId, +) { + let backend = id.backend(); + client + .identities + .lock() + .select(backend) + .render_pipelines + .free(id) +} + +#[no_mangle] pub unsafe extern "C" fn wgpu_command_encoder_copy_buffer_to_buffer( src: id::BufferId, src_offset: wgt::BufferAddress, @@ -1346,10 +1517,6 @@ pub extern "C" fn wgpu_client_use_external_texture_in_swapChain( return false; } - if !static_prefs::pref!("dom.webgpu.swap-chain.external-texture-dx12") { - return false; - } - let supported = match format { wgt::TextureFormat::Bgra8Unorm => true, _ => false, diff --git a/gfx/wgpu_bindings/src/server.rs b/gfx/wgpu_bindings/src/server.rs index 8417fe84fb..1cedf35ea5 100644 --- a/gfx/wgpu_bindings/src/server.rs +++ b/gfx/wgpu_bindings/src/server.rs @@ -337,29 +337,38 @@ impl ShaderModuleCompilationMessage { fn set_error(&mut self, error: &CreateShaderModuleError, source: &str) { // The WebGPU spec says that if the message doesn't point to a particular position in // the source, the line number, position, offset and lengths should be zero. - self.line_number = 0; - self.line_pos = 0; - self.utf16_offset = 0; - self.utf16_length = 0; + let line_number; + let line_pos; + let utf16_offset; + let utf16_length; if let Some(location) = error.location(source) { - self.line_number = location.line_number as u64; - self.line_pos = location.line_position as u64; - + let len_utf16 = |s: &str| s.chars().map(|c| c.len_utf16() as u64).sum(); let start = location.offset as usize; let end = start + location.length as usize; - self.utf16_offset = source[0..start].chars().map(|c| c.len_utf16() as u64).sum(); - self.utf16_length = source[start..end] - .chars() - .map(|c| c.len_utf16() as u64) - .sum(); + utf16_offset = len_utf16(&source[0..start]); + utf16_length = len_utf16(&source[start..end]); + + line_number = location.line_number as u64; + // Naga reports a `line_pos` using UTF-8 bytes, so we cannot use it. + let line_start = source[0..start].rfind('\n').map(|pos| pos + 1).unwrap_or(0); + line_pos = len_utf16(&source[line_start..start]) + 1; + } else { + line_number = 0; + line_pos = 0; + utf16_offset = 0; + utf16_length = 0; } - let error_string = error.to_string(); + let message = nsString::from(&error.to_string()); - if !error_string.is_empty() { - self.message = nsString::from(&error_string[..]); - } + *self = Self { + line_number, + line_pos, + utf16_offset, + utf16_length, + message, + }; } } diff --git a/gfx/wr/Cargo.lock b/gfx/wr/Cargo.lock index 2bf28a1b6e..333aa68686 100644 --- a/gfx/wr/Cargo.lock +++ b/gfx/wr/Cargo.lock @@ -347,7 +347,7 @@ dependencies = [ "indexmap", "strsim", "termcolor", - "textwrap", + "textwrap 0.15.0", "yaml-rust", ] @@ -996,9 +996,9 @@ dependencies = [ [[package]] name = "glean" -version = "58.1.0" +version = "59.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f58388f10d013e2d12bb58e6e76983ede120789956fe827913a3d2560c66d44d" +checksum = "0ceede8fb9c90ba1b77fb8290d3ae7b62bfcb422ad1d6e46bae1c8af3f22f12d" dependencies = [ "glean-core", "inherent", @@ -1009,9 +1009,9 @@ dependencies = [ [[package]] name = "glean-core" -version = "58.1.0" +version = "59.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed9acc46fd38c5c995a0537e76364496addace660839dc279079e5957e3c1093" +checksum = "ea06a592b1395e0a16a5f4d6872f009ca7c98acc5127a8119088f1b435b5aaae" dependencies = [ "android_logger", "bincode", @@ -1189,9 +1189,9 @@ dependencies = [ [[package]] name = "goblin" -version = "0.6.0" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "572564d6cba7d09775202c8e7eebc4d534d5ae36578ab402fb21e182a0ac9505" +checksum = "bb07a4ffed2093b118a525b1d8f5204ae274faed5604537caf7135d0f18d9887" dependencies = [ "log", "plain", @@ -1873,9 +1873,9 @@ checksum = "dd8b5dd2ae5ed71462c540258bedcb51965123ad7e7ccf4b9a8cafaa4a63576d" [[package]] name = "oneshot-uniffi" -version = "0.1.5" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ae4988774e7a7e6a0783d119bdc683ea8c1d01a24d4fff9b4bdc280e07bd99e" +checksum = "6c548d5c78976f6955d72d0ced18c48ca07030f7a1d4024529fedd7c1c01b29c" [[package]] name = "ordered-float" @@ -2338,22 +2338,22 @@ checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" [[package]] name = "scroll" -version = "0.11.0" +version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04c565b551bafbef4157586fa379538366e4385d42082f255bfd96e4fe8519da" +checksum = "6ab8598aa408498679922eff7fa985c25d58a90771bd6be794434c5277eab1a6" dependencies = [ "scroll_derive", ] [[package]] name = "scroll_derive" -version = "0.11.0" +version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bdbda6ac5cd1321e724fa9cee216f3a61885889b896f073b8f82322789c5250e" +checksum = "7f81c2fde025af7e69b1d1420531c8a8811ca898919db177141a85313b1cb932" dependencies = [ "proc-macro2", "quote", - "syn 1.0.91", + "syn 2.0.25", ] [[package]] @@ -2452,6 +2452,12 @@ dependencies = [ ] [[package]] +name = "smawk" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b7c388c1b5e93756d0c740965c41e8822f866621d41acbdf6336a6a168f8840c" + +[[package]] name = "smithay-client-toolkit" version = "0.15.4" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -2618,6 +2624,17 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b1141d4d61095b28419e22cb0bbf02755f5e54e0526f97f1e3d1d160e60885fb" [[package]] +name = "textwrap" +version = "0.16.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "23d434d3f8967a09480fb04132ebe0a3e088c173e6d0ee7897abbdf4eab0f8b9" +dependencies = [ + "smawk", + "unicode-linebreak", + "unicode-width", +] + +[[package]] name = "thiserror" version = "1.0.38" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -2718,6 +2735,12 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b15811caf2415fb889178633e7724bad2509101cde276048e013b9def5e51fa0" [[package]] +name = "unicode-linebreak" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3b09c83c3c29d37506a3e260c08c03743a6bb66a9cd432c6934ab501a190571f" + +[[package]] name = "unicode-normalization" version = "0.1.19" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -2727,6 +2750,12 @@ dependencies = [ ] [[package]] +name = "unicode-width" +version = "0.1.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e51733f11c9c4f72aa0c160008246859e340b00807569a0da0e7a1079b27ba85" + +[[package]] name = "unicode-xid" version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -2734,9 +2763,9 @@ checksum = "826e7639553986605ec5979c7dd957c7895e93eabed50ab2ffa7f6128a75097c" [[package]] name = "uniffi" -version = "0.25.3" +version = "0.27.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21345172d31092fd48c47fd56c53d4ae9e41c4b1f559fb8c38c1ab1685fd919f" +checksum = "a5566fae48a5cb017005bf9cd622af5236b2a203a13fb548afde3506d3c68277" dependencies = [ "anyhow", "uniffi_build", @@ -2746,9 +2775,9 @@ dependencies = [ [[package]] name = "uniffi_bindgen" -version = "0.25.3" +version = "0.27.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd992f2929a053829d5875af1eff2ee3d7a7001cb3b9a46cc7895f2caede6940" +checksum = "4a77bb514bcd4bf27c9bd404d7c3f2a6a8131b957eba9c22cfeb7751c4278e09" dependencies = [ "anyhow", "askama", @@ -2761,6 +2790,7 @@ dependencies = [ "once_cell", "paste", "serde", + "textwrap 0.16.1", "toml", "uniffi_meta", "uniffi_testing", @@ -2769,9 +2799,9 @@ dependencies = [ [[package]] name = "uniffi_build" -version = "0.25.3" +version = "0.27.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "001964dd3682d600084b3aaf75acf9c3426699bc27b65e96bb32d175a31c74e9" +checksum = "45cba427aeb7b3a8b54830c4c915079a7a3c62608dd03dddba1d867a8a023eb4" dependencies = [ "anyhow", "camino", @@ -2780,9 +2810,9 @@ dependencies = [ [[package]] name = "uniffi_checksum_derive" -version = "0.25.3" +version = "0.27.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "55137c122f712d9330fd985d66fa61bdc381752e89c35708c13ce63049a3002c" +checksum = "ae7e5a6c33b1dec3f255f57ec0b6af0f0b2bb3021868be1d5eec7a38e2905ebc" dependencies = [ "quote", "syn 2.0.25", @@ -2790,9 +2820,9 @@ dependencies = [ [[package]] name = "uniffi_core" -version = "0.25.3" +version = "0.27.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6121a127a3af1665cd90d12dd2b3683c2643c5103281d0fed5838324ca1fad5b" +checksum = "0ea3eb5474d50fc149b7e4d86b9c5bd4a61dcc167f0683902bf18ae7bbb3deef" dependencies = [ "anyhow", "bytes", @@ -2806,9 +2836,9 @@ dependencies = [ [[package]] name = "uniffi_macros" -version = "0.25.3" +version = "0.27.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11cf7a58f101fcedafa5b77ea037999b88748607f0ef3a33eaa0efc5392e92e4" +checksum = "18331d35003f46f0d04047fbe4227291815b83a937a8c32bc057f990962182c4" dependencies = [ "bincode", "camino", @@ -2819,15 +2849,14 @@ dependencies = [ "serde", "syn 2.0.25", "toml", - "uniffi_build", "uniffi_meta", ] [[package]] name = "uniffi_meta" -version = "0.25.3" +version = "0.27.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "71dc8573a7b1ac4b71643d6da34888273ebfc03440c525121f1b3634ad3417a2" +checksum = "f7224422c4cfd181c7ca9fca2154abca4d21db962f926f270f996edd38b0c4b8" dependencies = [ "anyhow", "bytes", @@ -2837,9 +2866,9 @@ dependencies = [ [[package]] name = "uniffi_testing" -version = "0.25.3" +version = "0.27.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "118448debffcb676ddbe8c5305fb933ab7e0123753e659a71dc4a693f8d9f23c" +checksum = "f8ce878d0bdfc288b58797044eaaedf748526c56eef3575380bb4d4b19d69eee" dependencies = [ "anyhow", "camino", @@ -2850,11 +2879,12 @@ dependencies = [ [[package]] name = "uniffi_udl" -version = "0.25.3" +version = "0.27.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "889edb7109c6078abe0e53e9b4070cf74a6b3468d141bdf5ef1bd4d1dc24a1c3" +checksum = "8c43c9ed40a8d20a5c3eae2d23031092db6b96dc8e571beb449ba9757484cea0" dependencies = [ "anyhow", + "textwrap 0.16.1", "uniffi_meta", "uniffi_testing", "weedle2", @@ -3141,9 +3171,9 @@ dependencies = [ [[package]] name = "weedle2" -version = "4.0.0" +version = "5.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e79c5206e1f43a2306fd64bdb95025ee4228960f2e6c5a8b173f3caaf807741" +checksum = "998d2c24ec099a87daf9467808859f9d82b61f1d9c9701251aea037f514eae0e" dependencies = [ "nom 7.1.1", ] diff --git a/gfx/wr/peek-poke/src/vec_ext.rs b/gfx/wr/peek-poke/src/vec_ext.rs index 42e26032e5..91438ee26a 100644 --- a/gfx/wr/peek-poke/src/vec_ext.rs +++ b/gfx/wr/peek-poke/src/vec_ext.rs @@ -8,8 +8,6 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use std::vec::Vec; - pub trait VecExt { type Item; unsafe fn set_end_ptr(&mut self, end: *const Self::Item); diff --git a/gfx/wr/servo-tidy.toml b/gfx/wr/servo-tidy.toml index 442c407fca..d548c5089f 100644 --- a/gfx/wr/servo-tidy.toml +++ b/gfx/wr/servo-tidy.toml @@ -32,6 +32,8 @@ packages = [ # transition to syn 2 is underway. "syn", "synstructure", + # Requires an update to clap v4 + "textwrap", # Can be fixed by removing time dependency - see bug 1765324 "wasi", ] diff --git a/gfx/wr/webrender/Cargo.toml b/gfx/wr/webrender/Cargo.toml index 506226beca..b99404de0d 100644 --- a/gfx/wr/webrender/Cargo.toml +++ b/gfx/wr/webrender/Cargo.toml @@ -52,7 +52,7 @@ svg_fmt = "0.4" tracy-rs = "0.1.2" derive_more = { version = "0.99", default-features = false, features = ["add_assign"] } etagere = "0.2.6" -glean = { version = "58.1.0", optional = true } +glean = { version = "59.0.0", optional = true } firefox-on-glean = { version = "0.1.0", optional = true } swgl = { path = "../swgl", optional = true } topological-sort = "0.1" diff --git a/gfx/wr/webrender/res/ps_quad.glsl b/gfx/wr/webrender/res/ps_quad.glsl index 94c80a93f7..3565c28afc 100644 --- a/gfx/wr/webrender/res/ps_quad.glsl +++ b/gfx/wr/webrender/res/ps_quad.glsl @@ -25,6 +25,11 @@ /// +-----------------------------+ | device pixel scale | +-----------+--------------+-+-+ /// | content origin | /// +--------------------+ +/// +/// To use the quad infrastructure, a shader must define the following entry +/// points in the corresponding shader stages: +/// - void pattern_vertex(PrimitiveInfo prim) +/// - vec4 pattern_fragment(vec4 base_color) ///``` #define WR_FEATURE_TEXTURE_2D @@ -39,6 +44,10 @@ flat varying mediump vec4 v_uv_sample_bounds; flat varying lowp ivec4 v_flags; varying highp vec2 v_uv; +#ifndef SWGL_ANTIALIAS +varying highp vec2 vLocalPos; +#endif + #ifdef WR_VERTEX_SHADER #define EDGE_AA_LEFT 1 @@ -73,6 +82,7 @@ struct PrimitiveInfo { int edge_flags; int quad_flags; + ivec2 pattern_input; }; struct QuadSegment { @@ -112,6 +122,7 @@ QuadPrimitive fetch_primitive(int index) { struct QuadHeader { int transform_id; int z_id; + ivec2 pattern_input; }; QuadHeader fetch_header(int address) { @@ -119,7 +130,8 @@ QuadHeader fetch_header(int address) { QuadHeader qh = QuadHeader( header.x, - header.y + header.y, + header.zw ); return qh; @@ -204,7 +216,11 @@ float edge_aa_offset(int edge, int flags) { return ((flags & edge) != 0) ? AA_PIXEL_RADIUS : 0.0; } -PrimitiveInfo ps_quad_main(void) { +#ifdef WR_VERTEX_SHADER +void pattern_vertex(PrimitiveInfo prim); +#endif + +PrimitiveInfo quad_primive_info(void) { QuadInstance qi = decode_instance(); QuadHeader qh = fetch_header(qi.prim_address_i); @@ -339,7 +355,54 @@ PrimitiveInfo ps_quad_main(void) { prim.bounds, prim.clip, qi.edge_flags, - qi.quad_flags + qi.quad_flags, + qh.pattern_input ); } + +void antialiasing_vertex(PrimitiveInfo prim) { +#ifndef SWGL_ANTIALIAS + // This does the setup that is required for init_tranform_vs. + RectWithEndpoint xf_bounds = RectWithEndpoint( + max(prim.local_prim_rect.p0, prim.local_clip_rect.p0), + min(prim.local_prim_rect.p1, prim.local_clip_rect.p1) + ); + vTransformBounds = vec4(xf_bounds.p0, xf_bounds.p1); + + vLocalPos = prim.local_pos; + + if (prim.edge_flags == 0) { + v_flags.x = 0; + } else { + v_flags.x = 1; + } +#endif +} + +void main() { + PrimitiveInfo prim = quad_primive_info(); + antialiasing_vertex(prim); + pattern_vertex(prim); +} +#endif + +#ifdef WR_FRAGMENT_SHADER +vec4 pattern_fragment(vec4 base_color); + +float antialiasing_fragment() { + float alpha = 1.0; +#ifndef SWGL_ANTIALIAS + if (v_flags.x != 0) { + alpha = init_transform_fs(vLocalPos); + } +#endif + return alpha; +} + +void main() { + vec4 base_color = v_color; + base_color *= antialiasing_fragment(); + oFragColor = pattern_fragment(base_color); +} + #endif diff --git a/gfx/wr/webrender/res/ps_quad_mask.glsl b/gfx/wr/webrender/res/ps_quad_mask.glsl index 6b72714efb..0d700d32ec 100644 --- a/gfx/wr/webrender/res/ps_quad_mask.glsl +++ b/gfx/wr/webrender/res/ps_quad_mask.glsl @@ -15,10 +15,10 @@ flat varying highp vec4 vClipCenter_Radius_TL; flat varying highp vec4 vClipCenter_Radius_TR; flat varying highp vec4 vClipCenter_Radius_BR; flat varying highp vec4 vClipCenter_Radius_BL; -flat varying highp vec3 vClipPlane_TL; -flat varying highp vec3 vClipPlane_TR; -flat varying highp vec3 vClipPlane_BL; -flat varying highp vec3 vClipPlane_BR; +// We pack 4 vec3 clip planes into 3 vec4 to save a varying slot. +flat varying highp vec4 vClipPlane_A; +flat varying highp vec4 vClipPlane_B; +flat varying highp vec4 vClipPlane_C; #endif flat varying highp vec2 vClipMode; @@ -62,8 +62,7 @@ Clip fetch_clip(int index) { return clip; } -void main(void) { - PrimitiveInfo prim_info = ps_quad_main(); +void pattern_vertex(PrimitiveInfo prim_info) { Clip clip = fetch_clip(aClipData.y); Transform clip_transform = fetch_transform(aClipData.x); @@ -121,14 +120,18 @@ void main(void) { vec2 n_tr = vec2(r_tr.y, -r_tr.x); vec2 n_br = r_br.yx; vec2 n_bl = vec2(-r_bl.y, r_bl.x); - vClipPlane_TL = vec3(n_tl, - dot(n_tl, vec2(clip.rect.p0.x, clip.rect.p0.y + r_tl.y))); - vClipPlane_TR = vec3(n_tr, - dot(n_tr, vec2(clip.rect.p1.x - r_tr.x, clip.rect.p0.y))); - vClipPlane_BR = vec3(n_br, - dot(n_br, vec2(clip.rect.p1.x, clip.rect.p1.y - r_br.y))); - vClipPlane_BL = vec3(n_bl, - dot(n_bl, vec2(clip.rect.p0.x + r_bl.x, clip.rect.p1.y))); + vec3 tl = vec3(n_tl, + dot(n_tl, vec2(clip.rect.p0.x, clip.rect.p0.y + r_tl.y))); + vec3 tr = vec3(n_tr, + dot(n_tr, vec2(clip.rect.p1.x - r_tr.x, clip.rect.p0.y))); + vec3 br = vec3(n_br, + dot(n_br, vec2(clip.rect.p1.x, clip.rect.p1.y - r_br.y))); + vec3 bl = vec3(n_bl, + dot(n_bl, vec2(clip.rect.p0.x + r_bl.x, clip.rect.p1.y))); + + vClipPlane_A = vec4(tl.x, tl.y, tl.z, tr.x); + vClipPlane_B = vec4(tr.y, tr.z, br.x, br.y); + vClipPlane_C = vec4(br.z, bl.x, bl.y, bl.z); #endif } @@ -148,22 +151,27 @@ float sd_rounded_box(in vec2 pos, in vec2 box_size, in float radius) { } #endif -void main(void) { +vec4 pattern_fragment(vec4 _base_color) { vec2 clip_local_pos = vClipLocalPos.xy / vClipLocalPos.w; float aa_range = compute_aa_range(clip_local_pos); #ifdef WR_FEATURE_FAST_PATH float dist = sd_rounded_box(clip_local_pos, v_clip_params.xy, v_clip_params.z); #else + vec3 plane_tl = vec3(vClipPlane_A.x, vClipPlane_A.y, vClipPlane_A.z); + vec3 plane_tr = vec3(vClipPlane_A.w, vClipPlane_B.x, vClipPlane_B.y); + vec3 plane_br = vec3(vClipPlane_B.z, vClipPlane_B.w, vClipPlane_C.x); + vec3 plane_bl = vec3(vClipPlane_C.y, vClipPlane_C.z, vClipPlane_C.w); + float dist = distance_to_rounded_rect( clip_local_pos, - vClipPlane_TL, + plane_tl, vClipCenter_Radius_TL, - vClipPlane_TR, + plane_tr, vClipCenter_Radius_TR, - vClipPlane_BR, + plane_br, vClipCenter_Radius_BR, - vClipPlane_BL, + plane_bl, vClipCenter_Radius_BL, vTransformBounds ); @@ -175,6 +183,6 @@ void main(void) { // Select alpha or inverse alpha depending on clip in/out. float final_alpha = mix(alpha, 1.0 - alpha, vClipMode.x); - oFragColor = vec4(final_alpha); + return vec4(final_alpha); } #endif diff --git a/gfx/wr/webrender/res/ps_quad_textured.glsl b/gfx/wr/webrender/res/ps_quad_textured.glsl index 2382623cdb..b405ccac2c 100644 --- a/gfx/wr/webrender/res/ps_quad_textured.glsl +++ b/gfx/wr/webrender/res/ps_quad_textured.glsl @@ -6,30 +6,8 @@ #include ps_quad -#ifndef SWGL_ANTIALIAS -varying highp vec2 vLocalPos; -#endif - #ifdef WR_VERTEX_SHADER -void main(void) { - PrimitiveInfo info = ps_quad_main(); - -#ifndef SWGL_ANTIALIAS - RectWithEndpoint xf_bounds = RectWithEndpoint( - max(info.local_prim_rect.p0, info.local_clip_rect.p0), - min(info.local_prim_rect.p1, info.local_clip_rect.p1) - ); - vTransformBounds = vec4(xf_bounds.p0, xf_bounds.p1); - - vLocalPos = info.local_pos; - - if (info.edge_flags == 0) { - v_flags.x = 0; - } else { - v_flags.x = 1; - } -#endif - +void pattern_vertex(PrimitiveInfo info) { if ((info.quad_flags & QF_SAMPLE_AS_MASK) != 0) { v_flags.z = 1; } else { @@ -39,16 +17,7 @@ void main(void) { #endif #ifdef WR_FRAGMENT_SHADER -void main(void) { - vec4 color = v_color; - -#ifndef SWGL_ANTIALIAS - if (v_flags.x != 0) { - float alpha = init_transform_fs(vLocalPos); - color *= alpha; - } -#endif - +vec4 pattern_fragment(vec4 color) { if (v_flags.y != 0) { vec2 uv = clamp(v_uv, v_uv_sample_bounds.xy, v_uv_sample_bounds.zw); vec4 texel = TEX_SAMPLE(sColor0, uv); @@ -58,7 +27,7 @@ void main(void) { color *= texel; } - oFragColor = color; + return color; } #if defined(SWGL_DRAW_SPAN) diff --git a/gfx/wr/webrender/src/batch.rs b/gfx/wr/webrender/src/batch.rs index 605283c58d..7cf9341515 100644 --- a/gfx/wr/webrender/src/batch.rs +++ b/gfx/wr/webrender/src/batch.rs @@ -6,13 +6,14 @@ use api::{AlphaType, ClipMode, ImageBufferKind}; use api::{FontInstanceFlags, YuvColorSpace, YuvFormat, ColorDepth, ColorRange, PremultipliedColorF}; use api::units::*; use crate::clip::{ClipNodeFlags, ClipNodeRange, ClipItemKind, ClipStore}; -use crate::command_buffer::{PrimitiveCommand, QuadFlags}; +use crate::command_buffer::PrimitiveCommand; use crate::composite::CompositorSurfaceKind; +use crate::pattern::PatternKind; use crate::spatial_tree::{SpatialTree, SpatialNodeIndex, CoordinateSystemId}; use glyph_rasterizer::{GlyphFormat, SubpixelDirection}; use crate::gpu_cache::{GpuBlockData, GpuCache, GpuCacheAddress}; use crate::gpu_types::{BrushFlags, BrushInstance, PrimitiveHeaders, ZBufferId, ZBufferIdGenerator}; -use crate::gpu_types::{SplitCompositeInstance, QuadInstance}; +use crate::gpu_types::SplitCompositeInstance; use crate::gpu_types::{PrimitiveInstanceData, RasterizationSpace, GlyphInstance}; use crate::gpu_types::{PrimitiveHeader, PrimitiveHeaderIndex, TransformPaletteId, TransformPalette}; use crate::gpu_types::{ImageBrushData, get_shader_opacity, BoxShadowData, MaskInstance}; @@ -22,12 +23,13 @@ use crate::picture::{Picture3DContext, PictureCompositeMode, calculate_screen_uv use crate::prim_store::{PrimitiveInstanceKind, ClipData}; use crate::prim_store::{PrimitiveInstance, PrimitiveOpacity, SegmentInstanceIndex}; use crate::prim_store::{BrushSegment, ClipMaskKind, ClipTaskIndex}; -use crate::prim_store::{VECS_PER_SEGMENT, PrimitiveInstanceIndex}; +use crate::prim_store::VECS_PER_SEGMENT; +use crate::quad; use crate::render_target::RenderTargetContext; use crate::render_task_graph::{RenderTaskId, RenderTaskGraph}; use crate::render_task::{RenderTaskAddress, RenderTaskKind, SubPass}; use crate::renderer::{BlendMode, GpuBufferBuilder, ShaderColorMode}; -use crate::renderer::{MAX_VERTEX_TEXTURE_WIDTH, GpuBufferAddress}; +use crate::renderer::MAX_VERTEX_TEXTURE_WIDTH; use crate::resource_cache::{GlyphFetchResult, ImageProperties}; use crate::space::SpaceMapper; use crate::visibility::{PrimitiveVisibilityFlags, VisibilityState}; @@ -72,7 +74,7 @@ pub enum BatchKind { SplitComposite, TextRun(GlyphFormat), Brush(BrushBatchKind), - Primitive, + Quad(PatternKind), } /// Input textures for a primitive, without consideration of clip mask @@ -794,51 +796,6 @@ impl BatchBuilder { self.batcher.clear(); } - /// Add a quad primitive to the batch list, appllying edge AA and tiling - /// segments as required. - fn add_quad_to_batch( - &mut self, - prim_instance_index: PrimitiveInstanceIndex, - transform_id: TransformPaletteId, - prim_address_f: GpuBufferAddress, - quad_flags: QuadFlags, - edge_flags: EdgeAaSegmentMask, - segment_index: u8, - task_id: RenderTaskId, - z_generator: &mut ZBufferIdGenerator, - prim_instances: &[PrimitiveInstance], - render_tasks: &RenderTaskGraph, - gpu_buffer_builder: &mut GpuBufferBuilder, - ) { - let prim_instance = &prim_instances[prim_instance_index.0 as usize]; - let prim_info = &prim_instance.vis; - let bounding_rect = &prim_info.clip_chain.pic_coverage_rect; - let z_id = z_generator.next(); - - add_quad_to_batch( - self.batcher.render_task_address, - transform_id, - prim_address_f, - quad_flags, - edge_flags, - segment_index, - task_id, - z_id, - render_tasks, - gpu_buffer_builder, - |key, instance| { - let batch = self.batcher.set_params_and_get_batch( - key, - BatchFeatures::empty(), - bounding_rect, - z_id, - ); - - batch.push(instance); - } - ); - } - // Adds a primitive to a batch. // It can recursively call itself in some situations, for // example if it encounters a picture where the items @@ -869,38 +826,71 @@ impl BatchBuilder { PrimitiveCommand::Instance { prim_instance_index, gpu_buffer_address } => { (prim_instance_index, Some(gpu_buffer_address.as_int())) } - PrimitiveCommand::Quad { prim_instance_index, gpu_buffer_address, quad_flags, edge_flags, transform_id } => { + PrimitiveCommand::Quad { pattern, pattern_input, prim_instance_index, gpu_buffer_address, quad_flags, edge_flags, transform_id } => { + let prim_instance = &prim_instances[prim_instance_index.0 as usize]; + let prim_info = &prim_instance.vis; + let bounding_rect = &prim_info.clip_chain.pic_coverage_rect; + let render_task_address = self.batcher.render_task_address; + if segments.is_empty() { - self.add_quad_to_batch( - *prim_instance_index, + let z_id = z_generator.next(); + // TODO: Some pattern types will sample from render tasks. + // At the moment quads only use a render task as source for + // segments which have been pre-rendered and masked. + let src_color_task_id = RenderTaskId::INVALID; + + quad::add_to_batch( + *pattern, + *pattern_input, + render_task_address, *transform_id, *gpu_buffer_address, *quad_flags, *edge_flags, INVALID_SEGMENT_INDEX as u8, - RenderTaskId::INVALID, - z_generator, - prim_instances, + src_color_task_id, + z_id, render_tasks, gpu_buffer_builder, + |key, instance| { + let batch = self.batcher.set_params_and_get_batch( + key, + BatchFeatures::empty(), + bounding_rect, + z_id, + ); + batch.push(instance); + }, ); } else { for (i, task_id) in segments.iter().enumerate() { // TODO(gw): edge_flags should be per-segment, when used for more than composites debug_assert!(edge_flags.is_empty()); - self.add_quad_to_batch( - *prim_instance_index, + let z_id = z_generator.next(); + + quad::add_to_batch( + *pattern, + *pattern_input, + render_task_address, *transform_id, *gpu_buffer_address, *quad_flags, *edge_flags, i as u8, *task_id, - z_generator, - prim_instances, + z_id, render_tasks, gpu_buffer_builder, + |key, instance| { + let batch = self.batcher.set_params_and_get_batch( + key, + BatchFeatures::empty(), + bounding_rect, + z_id, + ); + batch.push(instance); + }, ); } } @@ -3892,154 +3882,6 @@ impl<'a, 'rc> RenderTargetContext<'a, 'rc> { } } -pub fn add_quad_to_batch<F>( - render_task_address: RenderTaskAddress, - transform_id: TransformPaletteId, - prim_address_f: GpuBufferAddress, - quad_flags: QuadFlags, - edge_flags: EdgeAaSegmentMask, - segment_index: u8, - task_id: RenderTaskId, - z_id: ZBufferId, - render_tasks: &RenderTaskGraph, - gpu_buffer_builder: &mut GpuBufferBuilder, - mut f: F, -) where F: FnMut(BatchKey, PrimitiveInstanceData) { - - #[repr(u8)] - enum PartIndex { - Center = 0, - Left = 1, - Top = 2, - Right = 3, - Bottom = 4, - All = 5, - } - - let mut writer = gpu_buffer_builder.i32.write_blocks(1); - writer.push_one([ - transform_id.0 as i32, - z_id.0, - 0, - 0, - ]); - let prim_address_i = writer.finish(); - - let texture = match task_id { - RenderTaskId::INVALID => { - TextureSource::Invalid - } - _ => { - let texture = render_tasks - .resolve_texture(task_id) - .expect("bug: valid task id must be resolvable"); - - texture - } - }; - - let textures = BatchTextures::prim_textured( - texture, - TextureSource::Invalid, - ); - - let default_blend_mode = if quad_flags.contains(QuadFlags::IS_OPAQUE) && task_id == RenderTaskId::INVALID { - BlendMode::None - } else { - BlendMode::PremultipliedAlpha - }; - - let edge_flags_bits = edge_flags.bits(); - - let prim_batch_key = BatchKey { - blend_mode: default_blend_mode, - kind: BatchKind::Primitive, - textures, - }; - - let edge_batch_key = BatchKey { - blend_mode: BlendMode::PremultipliedAlpha, - kind: BatchKind::Primitive, - textures, - }; - - if edge_flags.is_empty() { - let instance = QuadInstance { - render_task_address, - prim_address_i, - prim_address_f, - z_id, - transform_id, - edge_flags: edge_flags_bits, - quad_flags: quad_flags.bits(), - part_index: PartIndex::All as u8, - segment_index, - }; - - f(prim_batch_key, instance.into()); - } else if quad_flags.contains(QuadFlags::USE_AA_SEGMENTS) { - let main_instance = QuadInstance { - render_task_address, - prim_address_i, - prim_address_f, - z_id, - transform_id, - edge_flags: edge_flags_bits, - quad_flags: quad_flags.bits(), - part_index: PartIndex::Center as u8, - segment_index, - }; - - if edge_flags.contains(EdgeAaSegmentMask::LEFT) { - let instance = QuadInstance { - part_index: PartIndex::Left as u8, - ..main_instance - }; - f(edge_batch_key, instance.into()); - } - if edge_flags.contains(EdgeAaSegmentMask::RIGHT) { - let instance = QuadInstance { - part_index: PartIndex::Top as u8, - ..main_instance - }; - f(edge_batch_key, instance.into()); - } - if edge_flags.contains(EdgeAaSegmentMask::TOP) { - let instance = QuadInstance { - part_index: PartIndex::Right as u8, - ..main_instance - }; - f(edge_batch_key, instance.into()); - } - if edge_flags.contains(EdgeAaSegmentMask::BOTTOM) { - let instance = QuadInstance { - part_index: PartIndex::Bottom as u8, - ..main_instance - }; - f(edge_batch_key, instance.into()); - } - - let instance = QuadInstance { - ..main_instance - }; - f(prim_batch_key, instance.into()); - } else { - let instance = QuadInstance { - render_task_address, - prim_address_i, - prim_address_f, - z_id, - transform_id, - edge_flags: edge_flags_bits, - quad_flags: quad_flags.bits(), - part_index: PartIndex::All as u8, - segment_index, - }; - - f(edge_batch_key, instance.into()); - } -} - impl CompositorSurfaceKind { /// Returns true if the type of compositor surface needs an alpha cutout rendered fn needs_cutout(&self) -> bool { diff --git a/gfx/wr/webrender/src/capture.rs b/gfx/wr/webrender/src/capture.rs index 5cc1f90bab..7184f44b50 100644 --- a/gfx/wr/webrender/src/capture.rs +++ b/gfx/wr/webrender/src/capture.rs @@ -14,8 +14,6 @@ use api::units::DeviceIntSize; #[cfg(feature = "capture")] use crate::print_tree::{PrintableTree, PrintTree}; use crate::render_api::CaptureBits; -use ron; -use serde; #[derive(Clone)] diff --git a/gfx/wr/webrender/src/command_buffer.rs b/gfx/wr/webrender/src/command_buffer.rs index c0630db054..e630ceb628 100644 --- a/gfx/wr/webrender/src/command_buffer.rs +++ b/gfx/wr/webrender/src/command_buffer.rs @@ -3,6 +3,7 @@ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ use api::units::PictureRect; +use crate::pattern::{PatternKind, PatternShaderInput}; use crate::{spatial_tree::SpatialNodeIndex, render_task_graph::RenderTaskId, surface::SurfaceTileDescriptor, picture::TileKey, renderer::GpuBufferAddress, FastHashMap, prim_store::PrimitiveInstanceIndex, gpu_cache::GpuCacheAddress}; use crate::gpu_types::{QuadSegment, TransformPaletteId}; use crate::segment::EdgeAaSegmentMask; @@ -115,6 +116,8 @@ pub enum PrimitiveCommand { gpu_buffer_address: GpuBufferAddress, }, Quad { + pattern: PatternKind, + pattern_input: PatternShaderInput, // TODO(gw): Used for bounding rect only, could possibly remove prim_instance_index: PrimitiveInstanceIndex, gpu_buffer_address: GpuBufferAddress, @@ -144,6 +147,8 @@ impl PrimitiveCommand { } pub fn quad( + pattern: PatternKind, + pattern_input: PatternShaderInput, prim_instance_index: PrimitiveInstanceIndex, gpu_buffer_address: GpuBufferAddress, transform_id: TransformPaletteId, @@ -151,6 +156,8 @@ impl PrimitiveCommand { edge_flags: EdgeAaSegmentMask, ) -> Self { PrimitiveCommand::Quad { + pattern, + pattern_input, prim_instance_index, gpu_buffer_address, transform_id, @@ -232,8 +239,11 @@ impl CommandBuffer { self.commands.push(Command::draw_instance(prim_instance_index)); self.commands.push(Command::data((gpu_buffer_address.u as u32) << 16 | gpu_buffer_address.v as u32)); } - PrimitiveCommand::Quad { prim_instance_index, gpu_buffer_address, transform_id, quad_flags, edge_flags } => { + PrimitiveCommand::Quad { pattern, pattern_input, prim_instance_index, gpu_buffer_address, transform_id, quad_flags, edge_flags } => { self.commands.push(Command::draw_quad(prim_instance_index)); + self.commands.push(Command::data(pattern as u32)); + self.commands.push(Command::data(pattern_input.0 as u32)); + self.commands.push(Command::data(pattern_input.1 as u32)); self.commands.push(Command::data((gpu_buffer_address.u as u32) << 16 | gpu_buffer_address.v as u32)); self.commands.push(Command::data(transform_id.0)); self.commands.push(Command::data((quad_flags.bits() as u32) << 16 | edge_flags.bits() as u32)); @@ -279,6 +289,11 @@ impl CommandBuffer { } Command::CMD_DRAW_QUAD => { let prim_instance_index = PrimitiveInstanceIndex(param); + let pattern = PatternKind::from_u32(cmd_iter.next().unwrap().0); + let pattern_input = PatternShaderInput( + cmd_iter.next().unwrap().0 as i32, + cmd_iter.next().unwrap().0 as i32, + ); let data = cmd_iter.next().unwrap(); let transform_id = TransformPaletteId(cmd_iter.next().unwrap().0); let bits = cmd_iter.next().unwrap().0; @@ -289,6 +304,8 @@ impl CommandBuffer { v: (data.0 & 0xffff) as u16, }; let cmd = PrimitiveCommand::quad( + pattern, + pattern_input, prim_instance_index, gpu_buffer_address, transform_id, diff --git a/gfx/wr/webrender/src/device/gl.rs b/gfx/wr/webrender/src/device/gl.rs index 04a7e13023..c17a16a757 100644 --- a/gfx/wr/webrender/src/device/gl.rs +++ b/gfx/wr/webrender/src/device/gl.rs @@ -199,10 +199,6 @@ pub fn get_unoptimized_shader_source(shader_name: &str, base_path: Option<&PathB } } -pub trait FileWatcherHandler: Send { - fn file_changed(&self, path: PathBuf); -} - impl VertexAttributeKind { fn size_in_bytes(&self) -> u32 { match *self { @@ -938,7 +934,7 @@ impl VertexUsageHint { } #[derive(Copy, Clone, Debug)] -pub struct UniformLocation(gl::GLint); +pub struct UniformLocation(#[allow(dead_code)] gl::GLint); impl UniformLocation { pub const INVALID: Self = UniformLocation(-1); @@ -1412,7 +1408,8 @@ fn parse_mali_version(version_string: &str) -> Option<(u32, u32, u32)> { let (r_str, version_string) = version_string.split_once("p")?; let r = r_str.parse().ok()?; - let (p_str, _) = version_string.split_once("-")?; + // Not all devices have the trailing string following the "p" number. + let (p_str, _) = version_string.split_once("-").unwrap_or((version_string, "")); let p = p_str.parse().ok()?; Some((v, r, p)) diff --git a/gfx/wr/webrender/src/device/query_gl.rs b/gfx/wr/webrender/src/device/query_gl.rs index c7fd9a9070..db3dfcd6cf 100644 --- a/gfx/wr/webrender/src/device/query_gl.rs +++ b/gfx/wr/webrender/src/device/query_gl.rs @@ -313,6 +313,6 @@ impl Drop for GpuMarker { } #[must_use] -pub struct GpuTimeQuery(GpuMarker); +pub struct GpuTimeQuery(#[allow(dead_code)] GpuMarker); #[must_use] pub struct GpuSampleQuery; diff --git a/gfx/wr/webrender/src/internal_types.rs b/gfx/wr/webrender/src/internal_types.rs index c76b7d362d..97827a98fe 100644 --- a/gfx/wr/webrender/src/internal_types.rs +++ b/gfx/wr/webrender/src/internal_types.rs @@ -6,7 +6,6 @@ use api::{ColorF, DocumentId, ExternalImageId, PrimitiveFlags, Parameter, Render use api::{ImageFormat, NotificationRequest, Shadow, FilterOp, ImageBufferKind}; use api::FramePublishId; use api::units::*; -use api; use crate::render_api::DebugCommand; use crate::composite::NativeSurfaceOperation; use crate::device::TextureFilter; @@ -692,7 +691,6 @@ impl ResourceUpdateList { /// Wraps a frame_builder::Frame, but conceptually could hold more information pub struct RenderedDocument { pub frame: Frame, - pub is_new_scene: bool, pub profile: TransactionProfile, pub render_reasons: RenderReasons, pub frame_stats: Option<FullFrameStats> diff --git a/gfx/wr/webrender/src/lib.rs b/gfx/wr/webrender/src/lib.rs index d319bc68bf..2699da7cfb 100644 --- a/gfx/wr/webrender/src/lib.rs +++ b/gfx/wr/webrender/src/lib.rs @@ -101,11 +101,13 @@ mod gpu_types; mod hit_test; mod internal_types; mod lru_cache; +mod pattern; mod picture; mod picture_graph; mod prepare; mod prim_store; mod print_tree; +mod quad; mod render_backend; mod render_target; mod render_task_graph; diff --git a/gfx/wr/webrender/src/pattern.rs b/gfx/wr/webrender/src/pattern.rs new file mode 100644 index 0000000000..36a06fa2b9 --- /dev/null +++ b/gfx/wr/webrender/src/pattern.rs @@ -0,0 +1,68 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +use api::{ColorF, PremultipliedColorF}; + +#[repr(u32)] +#[cfg_attr(feature = "capture", derive(Serialize))] +#[cfg_attr(feature = "replay", derive(Deserialize))] +#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] +pub enum PatternKind { + ColorOrTexture = 0, + + Mask = 1, + // When adding patterns, don't forget to update the NUM_PATTERNS constant. +} + +pub const NUM_PATTERNS: u32 = 2; + +impl PatternKind { + pub fn from_u32(val: u32) -> Self { + assert!(val < NUM_PATTERNS); + unsafe { std::mem::transmute(val) } + } +} + +/// A 32bit payload used as input for the pattern-specific logic in the shader. +/// +/// Patterns typically use it as a GpuBuffer offset to fetch their data. +#[cfg_attr(feature = "capture", derive(Serialize))] +#[cfg_attr(feature = "replay", derive(Deserialize))] +#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] +pub struct PatternShaderInput(pub i32, pub i32); + +impl Default for PatternShaderInput { + fn default() -> Self { + PatternShaderInput(0, 0) + } +} + +#[derive(Copy, Clone, Debug)] +pub struct Pattern { + pub kind: PatternKind, + pub shader_input: PatternShaderInput, + pub base_color: PremultipliedColorF, + pub is_opaque: bool, +} + +impl Pattern { + pub fn color(color: ColorF) -> Self { + Pattern { + kind: PatternKind::ColorOrTexture, + shader_input: PatternShaderInput::default(), + base_color: color.premultiplied(), + is_opaque: color.a >= 1.0, + } + } + + pub fn clear() -> Self { + // Opaque black with operator dest out + Pattern { + kind: PatternKind::ColorOrTexture, + shader_input: PatternShaderInput::default(), + base_color: PremultipliedColorF::BLACK, + is_opaque: false, + } + } +} diff --git a/gfx/wr/webrender/src/prepare.rs b/gfx/wr/webrender/src/prepare.rs index a59eca0670..d9b4521cfc 100644 --- a/gfx/wr/webrender/src/prepare.rs +++ b/gfx/wr/webrender/src/prepare.rs @@ -6,13 +6,13 @@ //! //! TODO: document this! -use api::{ColorF, PremultipliedColorF, PropertyBinding}; +use api::{PremultipliedColorF, PropertyBinding}; use api::{BoxShadowClipMode, BorderStyle, ClipMode}; use api::units::*; use euclid::Scale; use smallvec::SmallVec; use crate::composite::CompositorSurfaceKind; -use crate::command_buffer::{PrimitiveCommand, QuadFlags, CommandBufferIndex}; +use crate::command_buffer::{PrimitiveCommand, CommandBufferIndex}; use crate::image_tiling::{self, Repetition}; use crate::border::{get_max_scale_for_border, build_border_instances}; use crate::clip::{ClipStore, ClipNodeRange}; @@ -20,32 +20,29 @@ use crate::spatial_tree::{SpatialNodeIndex, SpatialTree}; use crate::clip::{ClipDataStore, ClipNodeFlags, ClipChainInstance, ClipItemKind}; use crate::frame_builder::{FrameBuildingContext, FrameBuildingState, PictureContext, PictureState}; use crate::gpu_cache::{GpuCacheHandle, GpuDataRequest}; -use crate::gpu_types::{BrushFlags, TransformPaletteId, QuadSegment}; +use crate::gpu_types::BrushFlags; use crate::internal_types::{FastHashMap, PlaneSplitAnchor, Filter}; use crate::picture::{PicturePrimitive, SliceId, ClusterFlags, PictureCompositeMode}; use crate::picture::{PrimitiveList, PrimitiveCluster, SurfaceIndex, TileCacheInstance, SubpixelMode, Picture3DContext}; use crate::prim_store::line_dec::MAX_LINE_DECORATION_RESOLUTION; use crate::prim_store::*; +use crate::quad; +use crate::pattern::Pattern; use crate::prim_store::gradient::GradientGpuBlockBuilder; use crate::render_backend::DataStores; use crate::render_task_graph::RenderTaskId; use crate::render_task_cache::RenderTaskCacheKeyKind; use crate::render_task_cache::{RenderTaskCacheKey, to_cache_size, RenderTaskParent}; use crate::render_task::{RenderTaskKind, RenderTask, SubPass, MaskSubPass, EmptyTask}; -use crate::renderer::{GpuBufferBuilderF, GpuBufferAddress}; -use crate::segment::{EdgeAaSegmentMask, SegmentBuilder}; -use crate::space::SpaceMapper; -use crate::util::{clamp_to_scale_factor, pack_as_float, MaxRect}; +use crate::segment::SegmentBuilder; +use crate::util::{clamp_to_scale_factor, pack_as_float}; use crate::visibility::{compute_conservative_visible_rect, PrimitiveVisibility, VisibilityState}; const MAX_MASK_SIZE: i32 = 4096; -const MIN_BRUSH_SPLIT_SIZE: f32 = 256.0; const MIN_BRUSH_SPLIT_AREA: f32 = 128.0 * 128.0; -const MIN_AA_SEGMENTS_SIZE: f32 = 4.0; - pub fn prepare_primitives( store: &mut PrimitiveStore, prim_list: &mut PrimitiveList, @@ -141,110 +138,6 @@ fn can_use_clip_chain_for_quad_path( true } -/// Describes how clipping affects the rendering of a quad primitive. -/// -/// As a general rule, parts of the quad that require masking are prerendered in an -/// intermediate target and the mask is applied using multiplicative blending to -/// the intermediate result before compositing it into the destination target. -/// -/// Each segment can opt in or out of masking independently. -#[derive(Debug, Copy, Clone)] -pub enum QuadRenderStrategy { - /// The quad is not affected by any mask and is drawn directly in the destination - /// target. - Direct, - /// The quad is drawn entirely in an intermediate target and a mask is applied - /// before compositing in the destination target. - Indirect, - /// A rounded rectangle clip is applied to the quad primitive via a nine-patch. - /// The segments of the nine-patch that require a mask are rendered and masked in - /// an intermediate target, while other segments are drawn directly in the destination - /// target. - NinePatch { - radius: LayoutVector2D, - clip_rect: LayoutRect, - }, - /// Split the primitive into coarse tiles so that each tile independently - /// has the opportunity to be drawn directly in the destination target or - /// via an intermediate target if it is affected by a mask. - Tiled { - x_tiles: u16, - y_tiles: u16, - } -} - -fn get_prim_render_strategy( - prim_spatial_node_index: SpatialNodeIndex, - clip_chain: &ClipChainInstance, - clip_store: &ClipStore, - data_stores: &DataStores, - can_use_nine_patch: bool, - spatial_tree: &SpatialTree, -) -> QuadRenderStrategy { - if !clip_chain.needs_mask { - return QuadRenderStrategy::Direct - } - - fn tile_count_for_size(size: f32) -> u16 { - (size / MIN_BRUSH_SPLIT_SIZE).min(4.0).max(1.0).ceil() as u16 - } - - let prim_coverage_size = clip_chain.pic_coverage_rect.size(); - let x_tiles = tile_count_for_size(prim_coverage_size.width); - let y_tiles = tile_count_for_size(prim_coverage_size.height); - let try_split_prim = x_tiles > 1 || y_tiles > 1; - - if !try_split_prim { - return QuadRenderStrategy::Indirect; - } - - if can_use_nine_patch && clip_chain.clips_range.count == 1 { - let clip_instance = clip_store.get_instance_from_range(&clip_chain.clips_range, 0); - let clip_node = &data_stores.clip[clip_instance.handle]; - - if let ClipItemKind::RoundedRectangle { ref radius, mode: ClipMode::Clip, rect, .. } = clip_node.item.kind { - let max_corner_width = radius.top_left.width - .max(radius.bottom_left.width) - .max(radius.top_right.width) - .max(radius.bottom_right.width); - let max_corner_height = radius.top_left.height - .max(radius.bottom_left.height) - .max(radius.top_right.height) - .max(radius.bottom_right.height); - - if max_corner_width <= 0.5 * rect.size().width && - max_corner_height <= 0.5 * rect.size().height { - - let clip_prim_coords_match = spatial_tree.is_matching_coord_system( - prim_spatial_node_index, - clip_node.item.spatial_node_index, - ); - - if clip_prim_coords_match { - let map_clip_to_prim = SpaceMapper::new_with_target( - prim_spatial_node_index, - clip_node.item.spatial_node_index, - LayoutRect::max_rect(), - spatial_tree, - ); - - if let Some(rect) = map_clip_to_prim.map(&rect) { - return QuadRenderStrategy::NinePatch { - radius: LayoutVector2D::new(max_corner_width, max_corner_height), - clip_rect: rect, - }; - } - } - } - } - } - - QuadRenderStrategy::Tiled { - x_tiles, - y_tiles, - } -} - fn prepare_prim_for_render( store: &mut PrimitiveStore, prim_instance_index: usize, @@ -712,334 +605,32 @@ fn prepare_interned_prim_for_render( } ); } else { - let map_prim_to_surface = frame_context.spatial_tree.get_relative_transform( - prim_spatial_node_index, - pic_context.raster_spatial_node_index, - ); - let prim_is_2d_scale_translation = map_prim_to_surface.is_2d_scale_translation(); - let prim_is_2d_axis_aligned = map_prim_to_surface.is_2d_axis_aligned(); - - let strategy = get_prim_render_strategy( - prim_spatial_node_index, - &prim_instance.vis.clip_chain, - frame_state.clip_store, - data_stores, - prim_is_2d_scale_translation, - frame_context.spatial_tree, - ); - let prim_data = &data_stores.prim[*data_handle]; - let (color, is_opaque) = match prim_data.kind { - PrimitiveTemplateKind::Clear => { - // Opaque black with operator dest out - (ColorF::BLACK, false) - } + let pattern = match prim_data.kind { + PrimitiveTemplateKind::Clear => Pattern::clear(), PrimitiveTemplateKind::Rectangle { ref color, .. } => { let color = frame_context.scene_properties.resolve_color(color); - - (color, color.a >= 1.0) + Pattern::color(color) } }; - let premul_color = color.premultiplied(); - - let mut quad_flags = QuadFlags::empty(); - - // Only use AA edge instances if the primitive is large enough to require it - let prim_size = prim_data.common.prim_rect.size(); - if prim_size.width > MIN_AA_SEGMENTS_SIZE && prim_size.height > MIN_AA_SEGMENTS_SIZE { - quad_flags |= QuadFlags::USE_AA_SEGMENTS; - } - - if is_opaque { - quad_flags |= QuadFlags::IS_OPAQUE; - } - let needs_scissor = !prim_is_2d_scale_translation; - if !needs_scissor { - quad_flags |= QuadFlags::APPLY_DEVICE_CLIP; - } - - // TODO(gw): For now, we don't select per-edge AA at all if the primitive - // has a 2d transform, which matches existing behavior. However, - // as a follow up, we can now easily check if we have a 2d-aligned - // primitive on a subpixel boundary, and enable AA along those edge(s). - let aa_flags = if prim_is_2d_axis_aligned { - EdgeAaSegmentMask::empty() - } else { - EdgeAaSegmentMask::all() - }; - - let transform_id = frame_state.transforms.get_id( + quad::push_quad( + &pattern, + &prim_data.common.prim_rect, + prim_instance_index, prim_spatial_node_index, - pic_context.raster_spatial_node_index, - frame_context.spatial_tree, - ); - - // TODO(gw): Perhaps rather than writing untyped data here (we at least do validate - // the written block count) to gpu-buffer, we could add a trait for - // writing typed data? - let main_prim_address = write_prim_blocks( - &mut frame_state.frame_gpu_data.f32, - prim_data.common.prim_rect, - prim_instance.vis.clip_chain.local_clip_rect, - premul_color, - &[], + &prim_instance.vis.clip_chain, + device_pixel_scale, + frame_context, + pic_context, + targets, + &data_stores.clip, + frame_state, + pic_state, + scratch, ); - match strategy { - QuadRenderStrategy::Direct => { - frame_state.push_prim( - &PrimitiveCommand::quad( - prim_instance_index, - main_prim_address, - transform_id, - quad_flags, - aa_flags, - ), - prim_spatial_node_index, - targets, - ); - } - QuadRenderStrategy::Indirect => { - let surface = &frame_state.surfaces[pic_context.surface_index.0]; - let Some(clipped_surface_rect) = surface.get_surface_rect( - &prim_instance.vis.clip_chain.pic_coverage_rect, - frame_context.spatial_tree, - ) else { - return; - }; - - let p0 = clipped_surface_rect.min.to_f32(); - let p1 = clipped_surface_rect.max.to_f32(); - - let segment = add_segment( - p0.x, - p0.y, - p1.x, - p1.y, - true, - prim_instance, - prim_spatial_node_index, - pic_context.raster_spatial_node_index, - main_prim_address, - transform_id, - aa_flags, - quad_flags, - device_pixel_scale, - needs_scissor, - frame_state, - ); - - add_composite_prim( - prim_instance_index, - LayoutRect::new(p0.cast_unit(), p1.cast_unit()), - premul_color, - quad_flags, - frame_state, - targets, - &[segment], - ); - } - QuadRenderStrategy::Tiled { x_tiles, y_tiles } => { - let surface = &frame_state.surfaces[pic_context.surface_index.0]; - - let Some(clipped_surface_rect) = surface.get_surface_rect( - &prim_instance.vis.clip_chain.pic_coverage_rect, - frame_context.spatial_tree, - ) else { - return; - }; - - let unclipped_surface_rect = surface.map_to_device_rect( - &prim_instance.vis.clip_chain.pic_coverage_rect, - frame_context.spatial_tree, - ); - - scratch.quad_segments.clear(); - - let mut x_coords = vec![clipped_surface_rect.min.x]; - let mut y_coords = vec![clipped_surface_rect.min.y]; - - let dx = (clipped_surface_rect.max.x - clipped_surface_rect.min.x) as f32 / x_tiles as f32; - let dy = (clipped_surface_rect.max.y - clipped_surface_rect.min.y) as f32 / y_tiles as f32; - - for x in 1 .. (x_tiles as i32) { - x_coords.push((clipped_surface_rect.min.x as f32 + x as f32 * dx).round() as i32); - } - for y in 1 .. (y_tiles as i32) { - y_coords.push((clipped_surface_rect.min.y as f32 + y as f32 * dy).round() as i32); - } - - x_coords.push(clipped_surface_rect.max.x); - y_coords.push(clipped_surface_rect.max.y); - - for y in 0 .. y_coords.len()-1 { - let y0 = y_coords[y]; - let y1 = y_coords[y+1]; - - if y1 <= y0 { - continue; - } - - for x in 0 .. x_coords.len()-1 { - let x0 = x_coords[x]; - let x1 = x_coords[x+1]; - - if x1 <= x0 { - continue; - } - - let create_task = true; - - let segment = add_segment( - x0 as f32, - y0 as f32, - x1 as f32, - y1 as f32, - create_task, - prim_instance, - prim_spatial_node_index, - pic_context.raster_spatial_node_index, - main_prim_address, - transform_id, - aa_flags, - quad_flags, - device_pixel_scale, - needs_scissor, - frame_state, - ); - scratch.quad_segments.push(segment); - } - } - - add_composite_prim( - prim_instance_index, - unclipped_surface_rect.cast_unit(), - premul_color, - quad_flags, - frame_state, - targets, - &scratch.quad_segments, - ); - } - QuadRenderStrategy::NinePatch { clip_rect, radius } => { - let surface = &frame_state.surfaces[pic_context.surface_index.0]; - let Some(clipped_surface_rect) = surface.get_surface_rect( - &prim_instance.vis.clip_chain.pic_coverage_rect, - frame_context.spatial_tree, - ) else { - return; - }; - - let unclipped_surface_rect = surface.map_to_device_rect( - &prim_instance.vis.clip_chain.pic_coverage_rect, - frame_context.spatial_tree, - ); - - let local_corner_0 = LayoutRect::new( - clip_rect.min, - clip_rect.min + radius, - ); - - let local_corner_1 = LayoutRect::new( - clip_rect.max - radius, - clip_rect.max, - ); - - let pic_corner_0 = pic_state.map_local_to_pic.map(&local_corner_0).unwrap(); - let pic_corner_1 = pic_state.map_local_to_pic.map(&local_corner_1).unwrap(); - - let surface_rect_0 = surface.map_to_device_rect( - &pic_corner_0, - frame_context.spatial_tree, - ).round_out().to_i32(); - - let surface_rect_1 = surface.map_to_device_rect( - &pic_corner_1, - frame_context.spatial_tree, - ).round_out().to_i32(); - - let p0 = surface_rect_0.min; - let p1 = surface_rect_0.max; - let p2 = surface_rect_1.min; - let p3 = surface_rect_1.max; - - let mut x_coords = [p0.x, p1.x, p2.x, p3.x]; - let mut y_coords = [p0.y, p1.y, p2.y, p3.y]; - - x_coords.sort_by(|a, b| a.partial_cmp(b).unwrap()); - y_coords.sort_by(|a, b| a.partial_cmp(b).unwrap()); - - scratch.quad_segments.clear(); - - for y in 0 .. y_coords.len()-1 { - let y0 = y_coords[y]; - let y1 = y_coords[y+1]; - - if y1 <= y0 { - continue; - } - - for x in 0 .. x_coords.len()-1 { - let x0 = x_coords[x]; - let x1 = x_coords[x+1]; - - if x1 <= x0 { - continue; - } - - let create_task = if x == 1 || y == 1 { - false - } else { - true - }; - - let r = DeviceIntRect::new( - DeviceIntPoint::new(x0, y0), - DeviceIntPoint::new(x1, y1), - ); - - let r = match r.intersection(&clipped_surface_rect) { - Some(r) => r, - None => { - continue; - } - }; - - let segment = add_segment( - r.min.x as f32, - r.min.y as f32, - r.max.x as f32, - r.max.y as f32, - create_task, - prim_instance, - prim_spatial_node_index, - pic_context.raster_spatial_node_index, - main_prim_address, - transform_id, - aa_flags, - quad_flags, - device_pixel_scale, - false, - frame_state, - ); - scratch.quad_segments.push(segment); - } - } - - add_composite_prim( - prim_instance_index, - unclipped_surface_rect.cast_unit(), - premul_color, - quad_flags, - frame_state, - targets, - &scratch.quad_segments, - ); - } - } - return; } } @@ -1304,7 +895,7 @@ fn prepare_interned_prim_for_render( .clipped_local_rect .cast_unit(); - let prim_address_f = write_prim_blocks( + let prim_address_f = quad::write_prim_blocks( &mut frame_state.frame_gpu_data.f32, prim_local_rect, prim_instance.vis.clip_chain.local_clip_rect, @@ -2129,143 +1720,6 @@ fn adjust_mask_scale_for_max_size(device_rect: DeviceIntRect, device_pixel_scale } } -pub fn write_prim_blocks( - builder: &mut GpuBufferBuilderF, - prim_rect: LayoutRect, - clip_rect: LayoutRect, - color: PremultipliedColorF, - segments: &[QuadSegment], -) -> GpuBufferAddress { - let mut writer = builder.write_blocks(3 + segments.len() * 2); - - writer.push_one(prim_rect); - writer.push_one(clip_rect); - writer.push_one(color); - - for segment in segments { - writer.push_one(segment.rect); - match segment.task_id { - RenderTaskId::INVALID => { - writer.push_one([0.0; 4]); - } - task_id => { - writer.push_render_task(task_id); - } - } - } - - writer.finish() -} - -fn add_segment( - x0: f32, - y0: f32, - x1: f32, - y1: f32, - create_task: bool, - prim_instance: &PrimitiveInstance, - prim_spatial_node_index: SpatialNodeIndex, - raster_spatial_node_index: SpatialNodeIndex, - prim_address_f: GpuBufferAddress, - transform_id: TransformPaletteId, - aa_flags: EdgeAaSegmentMask, - quad_flags: QuadFlags, - device_pixel_scale: DevicePixelScale, - needs_scissor_rect: bool, - frame_state: &mut FrameBuildingState, -) -> QuadSegment { - let task_size = DeviceSize::new(x1 - x0, y1 - y0).round().to_i32(); - let content_origin = DevicePoint::new(x0, y0); - - let rect = LayoutRect::new( - LayoutPoint::new(x0, y0), - LayoutPoint::new(x1, y1), - ); - - let task_id = if create_task { - let task_id = frame_state.rg_builder.add().init(RenderTask::new_dynamic( - task_size, - RenderTaskKind::new_prim( - prim_spatial_node_index, - raster_spatial_node_index, - device_pixel_scale, - content_origin, - prim_address_f, - transform_id, - aa_flags, - quad_flags, - prim_instance.vis.clip_chain.clips_range, - needs_scissor_rect, - ), - )); - - let masks = MaskSubPass { - clip_node_range: prim_instance.vis.clip_chain.clips_range, - prim_spatial_node_index, - prim_address_f, - }; - - let task = frame_state.rg_builder.get_task_mut(task_id); - task.add_sub_pass(SubPass::Masks { - masks, - }); - - frame_state.surface_builder.add_child_render_task( - task_id, - frame_state.rg_builder, - ); - - task_id - } else { - RenderTaskId::INVALID - }; - - QuadSegment { - rect, - task_id, - } -} - -fn add_composite_prim( - prim_instance_index: PrimitiveInstanceIndex, - rect: LayoutRect, - color: PremultipliedColorF, - quad_flags: QuadFlags, - frame_state: &mut FrameBuildingState, - targets: &[CommandBufferIndex], - segments: &[QuadSegment], -) { - let composite_prim_address = write_prim_blocks( - &mut frame_state.frame_gpu_data.f32, - rect, - rect, - color, - segments, - ); - - frame_state.set_segments( - segments, - targets, - ); - - let mut composite_quad_flags = QuadFlags::IGNORE_DEVICE_PIXEL_SCALE | QuadFlags::APPLY_DEVICE_CLIP; - if quad_flags.contains(QuadFlags::IS_OPAQUE) { - composite_quad_flags |= QuadFlags::IS_OPAQUE; - } - - frame_state.push_cmd( - &PrimitiveCommand::quad( - prim_instance_index, - composite_prim_address, - TransformPaletteId::IDENTITY, - composite_quad_flags, - // TODO(gw): No AA on composite, unless we use it to apply 2d clips - EdgeAaSegmentMask::empty(), - ), - targets, - ); -} - impl CompositorSurfaceKind { /// Returns true if the compositor surface strategy supports segment rendering fn supports_segments(&self) -> bool { diff --git a/gfx/wr/webrender/src/prim_store/storage.rs b/gfx/wr/webrender/src/prim_store/storage.rs index 4b99d87556..cc8997097c 100644 --- a/gfx/wr/webrender/src/prim_store/storage.rs +++ b/gfx/wr/webrender/src/prim_store/storage.rs @@ -2,7 +2,7 @@ * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ -use std::{iter::Extend, ops, marker::PhantomData, u32}; +use std::{ops, marker::PhantomData, u32}; use crate::util::Recycler; #[derive(Debug, Hash)] diff --git a/gfx/wr/webrender/src/profiler.rs b/gfx/wr/webrender/src/profiler.rs index ccf86e8647..6d63e10294 100644 --- a/gfx/wr/webrender/src/profiler.rs +++ b/gfx/wr/webrender/src/profiler.rs @@ -95,6 +95,8 @@ static PROFILER_PRESETS: &'static[(&'static str, &'static str)] = &[ (&"GPU samplers", &"Alpha targets samplers,Transparent pass samplers,Opaque pass samplers,Total samplers"), (&"Render reasons", &"Reason scene, Reason animated property, Reason resource update, Reason async image, Reason clear resources, Reason APZ, Reason resize, Reason widget, Reason cache flush, Reason snapshot, Reason resource hook, Reason config change, Reason content sync, Reason flush, On vsync, Reason testing, Reason other"), + + (&"Slow frame breakdown", &"Total slow frames CPU, Total slow frames GPU, Slow: frame build, Slow: upload, Slow: render, Slow: draw calls, Slow: targets, Slow: blobs"), ]; fn find_preset(name: &str) -> Option<&'static str> { @@ -256,7 +258,16 @@ pub const RENDER_REASON_VSYNC: usize = 119; pub const TEXTURES_CREATED: usize = 120; pub const TEXTURES_DELETED: usize = 121; -pub const NUM_PROFILER_EVENTS: usize = 122; +pub const SLOW_FRAME_CPU_COUNT: usize = 122; +pub const SLOW_FRAME_GPU_COUNT: usize = 123; +pub const SLOW_FRAME_BUILD_COUNT: usize = 124; +pub const SLOW_UPLOAD_COUNT: usize = 125; +pub const SLOW_RENDER_COUNT: usize = 126; +pub const SLOW_DRAW_CALLS_COUNT: usize = 127; +pub const SLOW_TARGETS_COUNT: usize = 128; +pub const SLOW_BLOB_COUNT: usize = 129; + +pub const NUM_PROFILER_EVENTS: usize = 130; pub struct Profiler { counters: Vec<Counter>, @@ -270,6 +281,23 @@ pub struct Profiler { // For FPS computation. Updated in update(). frame_timestamps_within_last_second: Vec<u64>, + /// Total number of slow frames on the CPU. + slow_frame_cpu_count: u64, + /// Total number of slow frames on the GPU. + slow_frame_gpu_count: u64, + /// Slow frames dominated by frame building. + slow_frame_build_count: u64, + /// Slow frames dominated by draw call submission. + slow_render_count: u64, + /// Slow frames dominated by texture uploads. + slow_upload_count: u64, + /// Slow renders with a high number of draw calls. + slow_draw_calls_count: u64, + /// Slow renders with a high number of render targets. + slow_targets_count: u64, + /// Slow uploads with a high number of blob tiles. + slow_blob_count: u64, + ui: Vec<Item>, } @@ -433,6 +461,15 @@ impl Profiler { int("Textures created", "", TEXTURES_CREATED, expected(0..5)), int("Textures deleted", "", TEXTURES_DELETED, Expected::none()), + + int("Total slow frames CPU", "", SLOW_FRAME_CPU_COUNT, Expected::none()), + int("Total slow frames GPU", "", SLOW_FRAME_GPU_COUNT, Expected::none()), + int("Slow: frame build", "%", SLOW_FRAME_BUILD_COUNT, Expected::none()), + int("Slow: upload", "%", SLOW_UPLOAD_COUNT, Expected::none()), + int("Slow: render", "%", SLOW_RENDER_COUNT, Expected::none()), + int("Slow: draw calls", "%", SLOW_DRAW_CALLS_COUNT, Expected::none()), + int("Slow: targets", "%", SLOW_TARGETS_COUNT, Expected::none()), + int("Slow: blobs", "%", SLOW_BLOB_COUNT, Expected::none()), ]; let mut counters = Vec::with_capacity(profile_counters.len()); @@ -452,6 +489,16 @@ impl Profiler { num_graph_samples: 500, // Would it be useful to control this via a pref? frame_timestamps_within_last_second: Vec::new(), + + slow_frame_cpu_count: 0, + slow_frame_gpu_count: 0, + slow_frame_build_count: 0, + slow_render_count: 0, + slow_upload_count: 0, + slow_draw_calls_count: 0, + slow_targets_count: 0, + slow_blob_count: 0, + ui: Vec::new(), } } @@ -460,7 +507,7 @@ impl Profiler { /// a specific counter. /// /// This is useful to monitor slow frame and slow transactions. - fn update_slow_event(&mut self, dst_counter: usize, counters: &[usize], threshold: f64) { + fn update_slow_event(&mut self, dst_counter: usize, counters: &[usize], threshold: f64) -> bool { let mut total = 0.0; for &counter in counters { if self.counters[counter].value.is_finite() { @@ -470,6 +517,61 @@ impl Profiler { if total > threshold { self.counters[dst_counter].set(total); + return true; + } + + false + } + + fn classify_slow_cpu_frame(&mut self) { + let is_apz = self.counters[RENDER_REASON_ANIMATED_PROPERTY].value > 0.5 + || self.counters[RENDER_REASON_APZ].value > 0.5; + + if !is_apz { + // Only consider slow frames affecting scrolling for now. + return; + } + + let frame_build = self.counters[FRAME_BUILDING_TIME].value; + let uploads = self.counters[TEXTURE_CACHE_UPDATE_TIME].value; + let renderer = self.counters[RENDERER_TIME].value - uploads; + let mut reasons = [ + (frame_build, &mut self.slow_frame_build_count, SLOW_FRAME_BUILD_COUNT,), + (renderer, &mut self.slow_render_count, SLOW_RENDER_COUNT,), + (uploads, &mut self.slow_upload_count, SLOW_UPLOAD_COUNT,), + ]; + + reasons.sort_by(|a, b| b.0.partial_cmp(&a.0).unwrap()); + + *reasons[0].1 += 1; + let reason = reasons[0].2; + std::mem::drop(reasons); + + self.slow_frame_cpu_count += 1; + + if reason == SLOW_RENDER_COUNT { + let draw_calls = self.counters[DRAW_CALLS].value; + if draw_calls > 200.0 { + self.slow_draw_calls_count += 1; + } + + let render_passes = self.counters[COLOR_PASSES].value + self.counters[ALPHA_PASSES].value; + if render_passes > 20.0 { + self.slow_targets_count += 1; + } + } + + if reason == SLOW_UPLOAD_COUNT { + let count = self.counters[TEXTURE_UPLOADS].value; + let blob_tiles = self.counters[RASTERIZED_BLOB_TILES].value; + // This is an approximation: we rasterize blobs for the whole displayport and + // only upload blob tiles for the current viewport. That said, the presence of + // a high number of blob tiles compared to the total number of uploads is still + // a good indication that blob images are the likely cause of the slow upload + // time, or at least contributing to it to a large extent. + if blob_tiles > count * 0.5 { + self.slow_blob_count += 1; + } } } @@ -484,7 +586,7 @@ impl Profiler { self.frame_timestamps_within_last_second.retain(|t| *t > one_second_ago); self.frame_timestamps_within_last_second.push(now); - self.update_slow_event( + let slow_cpu = self.update_slow_event( SLOW_FRAME, &[TOTAL_FRAME_CPU_TIME], 15.0, @@ -495,6 +597,20 @@ impl Profiler { 80.0 ); + if slow_cpu { + self.classify_slow_cpu_frame(); + } + + let div = 100.0 / self.slow_frame_cpu_count as f64; + self.counters[SLOW_FRAME_CPU_COUNT].set(self.slow_frame_cpu_count as f64); + self.counters[SLOW_FRAME_GPU_COUNT].set(self.slow_frame_gpu_count as f64); + self.counters[SLOW_FRAME_BUILD_COUNT].set(self.slow_frame_build_count as f64 * div); + self.counters[SLOW_RENDER_COUNT].set(self.slow_render_count as f64 * div); + self.counters[SLOW_UPLOAD_COUNT].set(self.slow_upload_count as f64 * div); + self.counters[SLOW_DRAW_CALLS_COUNT].set(self.slow_draw_calls_count as f64 * div); + self.counters[SLOW_TARGETS_COUNT].set(self.slow_targets_count as f64 * div); + self.counters[SLOW_BLOB_COUNT].set(self.slow_blob_count as f64 * div); + for counter in &mut self.counters { counter.update(update_avg); } @@ -517,7 +633,11 @@ impl Profiler { samples: gpu_queries }); - self.counters[GPU_TIME].set_f64(ns_to_ms(gpu_time_ns)); + let gpu_time = ns_to_ms(gpu_time_ns); + self.counters[GPU_TIME].set_f64(gpu_time); + if gpu_time > 12.0 { + self.slow_frame_gpu_count += 1; + } } // Find the index of a counter by its name. diff --git a/gfx/wr/webrender/src/quad.rs b/gfx/wr/webrender/src/quad.rs new file mode 100644 index 0000000000..5455611f3f --- /dev/null +++ b/gfx/wr/webrender/src/quad.rs @@ -0,0 +1,695 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +use api::{units::*, PremultipliedColorF, ClipMode}; +use euclid::point2; + +use crate::batch::{BatchKey, BatchKind, BatchTextures}; +use crate::clip::{ClipChainInstance, ClipIntern, ClipItemKind, ClipStore}; +use crate::command_buffer::{CommandBufferIndex, PrimitiveCommand, QuadFlags}; +use crate::frame_builder::{FrameBuildingContext, FrameBuildingState, PictureContext, PictureState}; +use crate::gpu_types::{PrimitiveInstanceData, QuadInstance, QuadSegment, TransformPaletteId, ZBufferId}; +use crate::intern::DataStore; +use crate::internal_types::TextureSource; +use crate::pattern::{Pattern, PatternKind, PatternShaderInput}; +use crate::prim_store::{PrimitiveInstanceIndex, PrimitiveScratchBuffer}; +use crate::render_task::{MaskSubPass, RenderTask, RenderTaskAddress, RenderTaskKind, SubPass}; +use crate::render_task_graph::{RenderTaskGraph, RenderTaskId}; +use crate::renderer::{BlendMode, GpuBufferAddress, GpuBufferBuilder, GpuBufferBuilderF}; +use crate::segment::EdgeAaSegmentMask; +use crate::space::SpaceMapper; +use crate::spatial_tree::{SpatialNodeIndex, SpatialTree}; +use crate::util::MaxRect; + +const MIN_AA_SEGMENTS_SIZE: f32 = 4.0; +const MIN_QUAD_SPLIT_SIZE: f32 = 256.0; + +/// Describes how clipping affects the rendering of a quad primitive. +/// +/// As a general rule, parts of the quad that require masking are prerendered in an +/// intermediate target and the mask is applied using multiplicative blending to +/// the intermediate result before compositing it into the destination target. +/// +/// Each segment can opt in or out of masking independently. +#[derive(Debug, Copy, Clone)] +enum QuadRenderStrategy { + /// The quad is not affected by any mask and is drawn directly in the destination + /// target. + Direct, + /// The quad is drawn entirely in an intermediate target and a mask is applied + /// before compositing in the destination target. + Indirect, + /// A rounded rectangle clip is applied to the quad primitive via a nine-patch. + /// The segments of the nine-patch that require a mask are rendered and masked in + /// an intermediate target, while other segments are drawn directly in the destination + /// target. + NinePatch { + radius: LayoutVector2D, + clip_rect: LayoutRect, + }, + /// Split the primitive into coarse tiles so that each tile independently + /// has the opportunity to be drawn directly in the destination target or + /// via an intermediate target if it is affected by a mask. + Tiled { + x_tiles: u16, + y_tiles: u16, + } +} + +pub fn push_quad( + pattern: &Pattern, + local_rect: &LayoutRect, + prim_instance_index: PrimitiveInstanceIndex, + prim_spatial_node_index: SpatialNodeIndex, + clip_chain: &ClipChainInstance, + device_pixel_scale: DevicePixelScale, + + frame_context: &FrameBuildingContext, + pic_context: &PictureContext, + targets: &[CommandBufferIndex], + interned_clips: &DataStore<ClipIntern>, + + frame_state: &mut FrameBuildingState, + pic_state: &mut PictureState, + scratch: &mut PrimitiveScratchBuffer, + +) { + let map_prim_to_surface = frame_context.spatial_tree.get_relative_transform( + prim_spatial_node_index, + pic_context.raster_spatial_node_index, + ); + let prim_is_2d_scale_translation = map_prim_to_surface.is_2d_scale_translation(); + let prim_is_2d_axis_aligned = map_prim_to_surface.is_2d_axis_aligned(); + + let strategy = get_prim_render_strategy( + prim_spatial_node_index, + clip_chain, + frame_state.clip_store, + interned_clips, + prim_is_2d_scale_translation, + frame_context.spatial_tree, + ); + + let mut quad_flags = QuadFlags::empty(); + + // Only use AA edge instances if the primitive is large enough to require it + let prim_size = local_rect.size(); + if prim_size.width > MIN_AA_SEGMENTS_SIZE && prim_size.height > MIN_AA_SEGMENTS_SIZE { + quad_flags |= QuadFlags::USE_AA_SEGMENTS; + } + + if pattern.is_opaque { + quad_flags |= QuadFlags::IS_OPAQUE; + } + let needs_scissor = !prim_is_2d_scale_translation; + if !needs_scissor { + quad_flags |= QuadFlags::APPLY_DEVICE_CLIP; + } + + // TODO(gw): For now, we don't select per-edge AA at all if the primitive + // has a 2d transform, which matches existing behavior. However, + // as a follow up, we can now easily check if we have a 2d-aligned + // primitive on a subpixel boundary, and enable AA along those edge(s). + let aa_flags = if prim_is_2d_axis_aligned { + EdgeAaSegmentMask::empty() + } else { + EdgeAaSegmentMask::all() + }; + + let transform_id = frame_state.transforms.get_id( + prim_spatial_node_index, + pic_context.raster_spatial_node_index, + frame_context.spatial_tree, + ); + + // TODO(gw): Perhaps rather than writing untyped data here (we at least do validate + // the written block count) to gpu-buffer, we could add a trait for + // writing typed data? + let main_prim_address = write_prim_blocks( + &mut frame_state.frame_gpu_data.f32, + *local_rect, + clip_chain.local_clip_rect, + pattern.base_color, + &[], + ); + + if let QuadRenderStrategy::Direct = strategy { + frame_state.push_prim( + &PrimitiveCommand::quad( + pattern.kind, + pattern.shader_input, + prim_instance_index, + main_prim_address, + transform_id, + quad_flags, + aa_flags, + ), + prim_spatial_node_index, + targets, + ); + return; + } + + let surface = &frame_state.surfaces[pic_context.surface_index.0]; + let Some(clipped_surface_rect) = surface.get_surface_rect( + &clip_chain.pic_coverage_rect, frame_context.spatial_tree + ) else { + return; + }; + + match strategy { + QuadRenderStrategy::Direct => {} + QuadRenderStrategy::Indirect => { + let segment = add_segment( + pattern, + &clipped_surface_rect, + true, + clip_chain, + prim_spatial_node_index, + pic_context.raster_spatial_node_index, + main_prim_address, + transform_id, + aa_flags, + quad_flags, + device_pixel_scale, + needs_scissor, + frame_state, + ); + + add_composite_prim( + pattern, + prim_instance_index, + segment.rect, + quad_flags, + frame_state, + targets, + &[segment], + ); + } + QuadRenderStrategy::Tiled { x_tiles, y_tiles } => { + let unclipped_surface_rect = surface + .map_to_device_rect(&clip_chain.pic_coverage_rect, frame_context.spatial_tree); + + scratch.quad_segments.clear(); + + let mut x_coords = vec![clipped_surface_rect.min.x]; + let mut y_coords = vec![clipped_surface_rect.min.y]; + + let dx = (clipped_surface_rect.max.x - clipped_surface_rect.min.x) as f32 / x_tiles as f32; + let dy = (clipped_surface_rect.max.y - clipped_surface_rect.min.y) as f32 / y_tiles as f32; + + for x in 1 .. (x_tiles as i32) { + x_coords.push((clipped_surface_rect.min.x as f32 + x as f32 * dx).round() as i32); + } + for y in 1 .. (y_tiles as i32) { + y_coords.push((clipped_surface_rect.min.y as f32 + y as f32 * dy).round() as i32); + } + + x_coords.push(clipped_surface_rect.max.x); + y_coords.push(clipped_surface_rect.max.y); + + for y in 0 .. y_coords.len()-1 { + let y0 = y_coords[y]; + let y1 = y_coords[y+1]; + + if y1 <= y0 { + continue; + } + + for x in 0 .. x_coords.len()-1 { + let x0 = x_coords[x]; + let x1 = x_coords[x+1]; + + if x1 <= x0 { + continue; + } + + let create_task = true; + let rect = DeviceIntRect { + min: point2(x0, y0), + max: point2(x1, y1), + }; + + let segment = add_segment( + pattern, + &rect, + create_task, + clip_chain, + prim_spatial_node_index, + pic_context.raster_spatial_node_index, + main_prim_address, + transform_id, + aa_flags, + quad_flags, + device_pixel_scale, + needs_scissor, + frame_state, + ); + scratch.quad_segments.push(segment); + } + } + + add_composite_prim( + pattern, + prim_instance_index, + unclipped_surface_rect.cast_unit(), + quad_flags, + frame_state, + targets, + &scratch.quad_segments, + ); + } + QuadRenderStrategy::NinePatch { clip_rect, radius } => { + let unclipped_surface_rect = surface + .map_to_device_rect(&clip_chain.pic_coverage_rect, frame_context.spatial_tree); + + let local_corner_0 = LayoutRect::new( + clip_rect.min, + clip_rect.min + radius, + ); + + let local_corner_1 = LayoutRect::new( + clip_rect.max - radius, + clip_rect.max, + ); + + let pic_corner_0 = pic_state.map_local_to_pic.map(&local_corner_0).unwrap(); + let pic_corner_1 = pic_state.map_local_to_pic.map(&local_corner_1).unwrap(); + + let surface_rect_0 = surface.map_to_device_rect( + &pic_corner_0, + frame_context.spatial_tree, + ).round_out().to_i32(); + + let surface_rect_1 = surface.map_to_device_rect( + &pic_corner_1, + frame_context.spatial_tree, + ).round_out().to_i32(); + + let p0 = surface_rect_0.min; + let p1 = surface_rect_0.max; + let p2 = surface_rect_1.min; + let p3 = surface_rect_1.max; + + let mut x_coords = [p0.x, p1.x, p2.x, p3.x]; + let mut y_coords = [p0.y, p1.y, p2.y, p3.y]; + + x_coords.sort_by(|a, b| a.partial_cmp(b).unwrap()); + y_coords.sort_by(|a, b| a.partial_cmp(b).unwrap()); + + scratch.quad_segments.clear(); + + for y in 0 .. y_coords.len()-1 { + let y0 = y_coords[y]; + let y1 = y_coords[y+1]; + + if y1 <= y0 { + continue; + } + + for x in 0 .. x_coords.len()-1 { + let x0 = x_coords[x]; + let x1 = x_coords[x+1]; + + if x1 <= x0 { + continue; + } + + // Only create render tasks for the corners. + let create_task = x != 1 && y != 1; + + let rect = DeviceIntRect::new(point2(x0, y0), point2(x1, y1)); + + let rect = match rect.intersection(&clipped_surface_rect) { + Some(rect) => rect, + None => { + continue; + } + }; + + let segment = add_segment( + pattern, + &rect, + create_task, + clip_chain, + prim_spatial_node_index, + pic_context.raster_spatial_node_index, + main_prim_address, + transform_id, + aa_flags, + quad_flags, + device_pixel_scale, + false, + frame_state, + ); + scratch.quad_segments.push(segment); + } + } + + add_composite_prim( + pattern, + prim_instance_index, + unclipped_surface_rect.cast_unit(), + quad_flags, + frame_state, + targets, + &scratch.quad_segments, + ); + } + } +} + +fn get_prim_render_strategy( + prim_spatial_node_index: SpatialNodeIndex, + clip_chain: &ClipChainInstance, + clip_store: &ClipStore, + interned_clips: &DataStore<ClipIntern>, + can_use_nine_patch: bool, + spatial_tree: &SpatialTree, +) -> QuadRenderStrategy { + if !clip_chain.needs_mask { + return QuadRenderStrategy::Direct + } + + fn tile_count_for_size(size: f32) -> u16 { + (size / MIN_QUAD_SPLIT_SIZE).min(4.0).max(1.0).ceil() as u16 + } + + let prim_coverage_size = clip_chain.pic_coverage_rect.size(); + let x_tiles = tile_count_for_size(prim_coverage_size.width); + let y_tiles = tile_count_for_size(prim_coverage_size.height); + let try_split_prim = x_tiles > 1 || y_tiles > 1; + + if !try_split_prim { + return QuadRenderStrategy::Indirect; + } + + if can_use_nine_patch && clip_chain.clips_range.count == 1 { + let clip_instance = clip_store.get_instance_from_range(&clip_chain.clips_range, 0); + let clip_node = &interned_clips[clip_instance.handle]; + + if let ClipItemKind::RoundedRectangle { ref radius, mode: ClipMode::Clip, rect, .. } = clip_node.item.kind { + let max_corner_width = radius.top_left.width + .max(radius.bottom_left.width) + .max(radius.top_right.width) + .max(radius.bottom_right.width); + let max_corner_height = radius.top_left.height + .max(radius.bottom_left.height) + .max(radius.top_right.height) + .max(radius.bottom_right.height); + + if max_corner_width <= 0.5 * rect.size().width && + max_corner_height <= 0.5 * rect.size().height { + + let clip_prim_coords_match = spatial_tree.is_matching_coord_system( + prim_spatial_node_index, + clip_node.item.spatial_node_index, + ); + + if clip_prim_coords_match { + let map_clip_to_prim = SpaceMapper::new_with_target( + prim_spatial_node_index, + clip_node.item.spatial_node_index, + LayoutRect::max_rect(), + spatial_tree, + ); + + if let Some(rect) = map_clip_to_prim.map(&rect) { + return QuadRenderStrategy::NinePatch { + radius: LayoutVector2D::new(max_corner_width, max_corner_height), + clip_rect: rect, + }; + } + } + } + } + } + + QuadRenderStrategy::Tiled { + x_tiles, + y_tiles, + } +} + +fn add_segment( + pattern: &Pattern, + rect: &DeviceIntRect, + create_task: bool, + clip_chain: &ClipChainInstance, + prim_spatial_node_index: SpatialNodeIndex, + raster_spatial_node_index: SpatialNodeIndex, + prim_address_f: GpuBufferAddress, + transform_id: TransformPaletteId, + aa_flags: EdgeAaSegmentMask, + quad_flags: QuadFlags, + device_pixel_scale: DevicePixelScale, + needs_scissor_rect: bool, + frame_state: &mut FrameBuildingState, +) -> QuadSegment { + let task_size = rect.size(); + let rect = rect.to_f32(); + let content_origin = rect.min; + + let task_id = if create_task { + let task_id = frame_state.rg_builder.add().init(RenderTask::new_dynamic( + task_size, + RenderTaskKind::new_prim( + pattern.kind, + pattern.shader_input, + prim_spatial_node_index, + raster_spatial_node_index, + device_pixel_scale, + content_origin, + prim_address_f, + transform_id, + aa_flags, + quad_flags, + clip_chain.clips_range, + needs_scissor_rect, + ), + )); + + let masks = MaskSubPass { + clip_node_range: clip_chain.clips_range, + prim_spatial_node_index, + prim_address_f, + }; + + let task = frame_state.rg_builder.get_task_mut(task_id); + task.add_sub_pass(SubPass::Masks { masks }); + + frame_state + .surface_builder + .add_child_render_task(task_id, frame_state.rg_builder); + + task_id + } else { + RenderTaskId::INVALID + }; + + QuadSegment { rect: rect.cast_unit(), task_id } +} + +fn add_composite_prim( + pattern: &Pattern, + prim_instance_index: PrimitiveInstanceIndex, + rect: LayoutRect, + quad_flags: QuadFlags, + frame_state: &mut FrameBuildingState, + targets: &[CommandBufferIndex], + segments: &[QuadSegment], +) { + let composite_prim_address = write_prim_blocks( + &mut frame_state.frame_gpu_data.f32, + rect, + rect, + pattern.base_color, + segments, + ); + + frame_state.set_segments(segments, targets); + + let mut composite_quad_flags = + QuadFlags::IGNORE_DEVICE_PIXEL_SCALE | QuadFlags::APPLY_DEVICE_CLIP; + if quad_flags.contains(QuadFlags::IS_OPAQUE) { + composite_quad_flags |= QuadFlags::IS_OPAQUE; + } + + frame_state.push_cmd( + &PrimitiveCommand::quad( + PatternKind::ColorOrTexture, + pattern.shader_input, + prim_instance_index, + composite_prim_address, + TransformPaletteId::IDENTITY, + composite_quad_flags, + // TODO(gw): No AA on composite, unless we use it to apply 2d clips + EdgeAaSegmentMask::empty(), + ), + targets, + ); +} + +pub fn write_prim_blocks( + builder: &mut GpuBufferBuilderF, + prim_rect: LayoutRect, + clip_rect: LayoutRect, + color: PremultipliedColorF, + segments: &[QuadSegment], +) -> GpuBufferAddress { + let mut writer = builder.write_blocks(3 + segments.len() * 2); + + writer.push_one(prim_rect); + writer.push_one(clip_rect); + writer.push_one(color); + + for segment in segments { + writer.push_one(segment.rect); + match segment.task_id { + RenderTaskId::INVALID => { + writer.push_one([0.0; 4]); + } + task_id => { + writer.push_render_task(task_id); + } + } + } + + writer.finish() +} + +pub fn add_to_batch<F>( + kind: PatternKind, + pattern_input: PatternShaderInput, + render_task_address: RenderTaskAddress, + transform_id: TransformPaletteId, + prim_address_f: GpuBufferAddress, + quad_flags: QuadFlags, + edge_flags: EdgeAaSegmentMask, + segment_index: u8, + task_id: RenderTaskId, + z_id: ZBufferId, + render_tasks: &RenderTaskGraph, + gpu_buffer_builder: &mut GpuBufferBuilder, + mut f: F, +) where F: FnMut(BatchKey, PrimitiveInstanceData) { + + // See the corresponfing #defines in ps_quad.glsl + #[repr(u8)] + enum PartIndex { + Center = 0, + Left = 1, + Top = 2, + Right = 3, + Bottom = 4, + All = 5, + } + + // See QuadHeader in ps_quad.glsl + let mut writer = gpu_buffer_builder.i32.write_blocks(1); + writer.push_one([ + transform_id.0 as i32, + z_id.0, + pattern_input.0, + pattern_input.1, + ]); + let prim_address_i = writer.finish(); + + let texture = match task_id { + RenderTaskId::INVALID => { + TextureSource::Invalid + } + _ => { + let texture = render_tasks + .resolve_texture(task_id) + .expect("bug: valid task id must be resolvable"); + + texture + } + }; + + let textures = BatchTextures::prim_textured( + texture, + TextureSource::Invalid, + ); + + let default_blend_mode = if quad_flags.contains(QuadFlags::IS_OPAQUE) && task_id == RenderTaskId::INVALID { + BlendMode::None + } else { + BlendMode::PremultipliedAlpha + }; + + let edge_flags_bits = edge_flags.bits(); + + let prim_batch_key = BatchKey { + blend_mode: default_blend_mode, + kind: BatchKind::Quad(kind), + textures, + }; + + let aa_batch_key = BatchKey { + blend_mode: BlendMode::PremultipliedAlpha, + kind: BatchKind::Quad(kind), + textures, + }; + + let mut instance = QuadInstance { + render_task_address, + prim_address_i, + prim_address_f, + z_id, + transform_id, + edge_flags: edge_flags_bits, + quad_flags: quad_flags.bits(), + part_index: PartIndex::All as u8, + segment_index, + }; + + if edge_flags.is_empty() { + // No antialisaing. + f(prim_batch_key, instance.into()); + } else if quad_flags.contains(QuadFlags::USE_AA_SEGMENTS) { + // Add instances for the antialisaing. This gives the center part + // an opportunity to stay in the opaque pass. + if edge_flags.contains(EdgeAaSegmentMask::LEFT) { + let instance = QuadInstance { + part_index: PartIndex::Left as u8, + ..instance + }; + f(aa_batch_key, instance.into()); + } + if edge_flags.contains(EdgeAaSegmentMask::RIGHT) { + let instance = QuadInstance { + part_index: PartIndex::Top as u8, + ..instance + }; + f(aa_batch_key, instance.into()); + } + if edge_flags.contains(EdgeAaSegmentMask::TOP) { + let instance = QuadInstance { + part_index: PartIndex::Right as u8, + ..instance + }; + f(aa_batch_key, instance.into()); + } + if edge_flags.contains(EdgeAaSegmentMask::BOTTOM) { + let instance = QuadInstance { + part_index: PartIndex::Bottom as u8, + ..instance + }; + f(aa_batch_key, instance.into()); + } + + instance = QuadInstance { + part_index: PartIndex::Center as u8, + ..instance + }; + + f(prim_batch_key, instance.into()); + } else { + // Render the anti-aliased quad with a single primitive. + f(aa_batch_key, instance.into()); + } +} + diff --git a/gfx/wr/webrender/src/render_backend.rs b/gfx/wr/webrender/src/render_backend.rs index ff4de9e82e..45ca2179d7 100644 --- a/gfx/wr/webrender/src/render_backend.rs +++ b/gfx/wr/webrender/src/render_backend.rs @@ -545,7 +545,6 @@ impl Document { self.frame_is_valid = true; self.dirty_rects_are_valid = true; - let is_new_scene = self.has_built_scene; self.has_built_scene = false; let frame_build_time_ms = @@ -559,7 +558,6 @@ impl Document { RenderedDocument { frame, - is_new_scene, profile: self.profile.take_and_reset(), frame_stats: frame_stats, render_reasons, @@ -1918,7 +1916,6 @@ impl RenderBackend { id, RenderedDocument { frame, - is_new_scene: true, profile: TransactionProfile::new(), render_reasons: RenderReasons::empty(), frame_stats: None, diff --git a/gfx/wr/webrender/src/render_target.rs b/gfx/wr/webrender/src/render_target.rs index f2d1c24c10..f53b5dd4f8 100644 --- a/gfx/wr/webrender/src/render_target.rs +++ b/gfx/wr/webrender/src/render_target.rs @@ -5,9 +5,10 @@ use api::{units::*, PremultipliedColorF, ClipMode}; use api::{ColorF, ImageFormat, LineOrientation, BorderStyle}; -use crate::batch::{AlphaBatchBuilder, AlphaBatchContainer, BatchTextures, add_quad_to_batch}; +use crate::batch::{AlphaBatchBuilder, AlphaBatchContainer, BatchTextures}; use crate::batch::{ClipBatcher, BatchBuilder, INVALID_SEGMENT_INDEX, ClipMaskInstanceList}; use crate::command_buffer::{CommandBufferList, QuadFlags}; +use crate::pattern::{PatternKind, PatternShaderInput}; use crate::segment::EdgeAaSegmentMask; use crate::spatial_tree::SpatialTree; use crate::clip::{ClipStore, ClipItemKind}; @@ -18,7 +19,7 @@ use crate::gpu_types::{TransformPalette, ZBufferIdGenerator, MaskInstance, ClipS use crate::gpu_types::{ZBufferId, QuadSegment, PrimitiveInstanceData, TransformPaletteId}; use crate::internal_types::{FastHashMap, TextureSource, CacheTextureId}; use crate::picture::{SliceId, SurfaceInfo, ResolvedSurfaceTexture, TileCacheInstance}; -use crate::prepare::write_prim_blocks; +use crate::quad; use crate::prim_store::{PrimitiveInstance, PrimitiveStore, PrimitiveScratchBuffer}; use crate::prim_store::gradient::{ FastLinearGradientInstance, LinearGradientInstance, RadialGradientInstance, @@ -210,6 +211,7 @@ impl<T: RenderTarget> RenderTargetList<T> { } } +const NUM_PATTERNS: usize = crate::pattern::NUM_PATTERNS as usize; /// Contains the work (in the form of instance arrays) needed to fill a color /// color output surface (RGBA8). @@ -235,10 +237,10 @@ pub struct ColorRenderTarget { pub resolve_ops: Vec<ResolveOp>, pub clear_color: Option<ColorF>, - pub prim_instances: Vec<PrimitiveInstanceData>, - pub prim_instances_with_scissor: FastHashMap<DeviceIntRect, Vec<PrimitiveInstanceData>>, - - pub clip_masks: Vec<ClipMaskInstanceList>, + pub prim_instances: [Vec<PrimitiveInstanceData>; NUM_PATTERNS], + pub prim_instances_with_scissor: FastHashMap<(DeviceIntRect, PatternKind), Vec<PrimitiveInstanceData>>, + + pub clip_masks: ClipMaskInstanceList, } impl RenderTarget for ColorRenderTarget { @@ -261,9 +263,9 @@ impl RenderTarget for ColorRenderTarget { used_rect, resolve_ops: Vec::new(), clear_color: Some(ColorF::TRANSPARENT), - prim_instances: Vec::new(), + prim_instances: [Vec::new(), Vec::new()], prim_instances_with_scissor: FastHashMap::default(), - clip_masks: Vec::new(), + clip_masks: ClipMaskInstanceList::new(), } } @@ -376,7 +378,9 @@ impl RenderTarget for ColorRenderTarget { let render_task_address = task_id.into(); let target_rect = task.get_target_rect(); - add_quad_to_batch( + quad::add_to_batch( + info.pattern, + info.pattern_input, render_task_address, info.transform_id, info.prim_address_f, @@ -390,11 +394,11 @@ impl RenderTarget for ColorRenderTarget { |_, instance| { if info.prim_needs_scissor_rect { self.prim_instances_with_scissor - .entry(target_rect) + .entry((target_rect, info.pattern)) .or_insert(Vec::new()) .push(instance); } else { - self.prim_instances.push(instance); + self.prim_instances[info.pattern as usize].push(instance); } } ); @@ -501,7 +505,7 @@ pub struct AlphaRenderTarget { pub zero_clears: Vec<RenderTaskId>, pub one_clears: Vec<RenderTaskId>, pub texture_id: CacheTextureId, - pub clip_masks: Vec<ClipMaskInstanceList>, + pub clip_masks: ClipMaskInstanceList, } impl RenderTarget for AlphaRenderTarget { @@ -519,7 +523,7 @@ impl RenderTarget for AlphaRenderTarget { zero_clears: Vec::new(), one_clears: Vec::new(), texture_id, - clip_masks: Vec::new(), + clip_masks: ClipMaskInstanceList::new(), } } @@ -1047,7 +1051,7 @@ fn build_mask_tasks( } for tile in clip_store.visible_mask_tiles(&clip_instance) { - let clip_prim_address = write_prim_blocks( + let clip_prim_address = quad::write_prim_blocks( &mut gpu_buffer_builder.f32, rect, rect, @@ -1062,7 +1066,9 @@ fn build_mask_tasks( .resolve_texture(tile.task_id) .expect("bug: texture not found for tile"); - add_quad_to_batch( + quad::add_to_batch( + PatternKind::ColorOrTexture, + PatternShaderInput::default(), render_task_address, clip_transform_id, clip_prim_address, @@ -1112,7 +1118,7 @@ fn build_mask_tasks( spatial_tree, ); - let main_prim_address = write_prim_blocks( + let main_prim_address = quad::write_prim_blocks( &mut gpu_buffer_builder.f32, task_world_rect.cast_unit(), task_world_rect.cast_unit(), @@ -1158,7 +1164,9 @@ fn build_mask_tasks( QuadFlags::empty() }; - add_quad_to_batch( + quad::add_to_batch( + PatternKind::Mask, + PatternShaderInput::default(), render_task_address, prim_transform_id, main_prim_address, @@ -1209,7 +1217,7 @@ fn build_sub_pass( render_tasks: &RenderTaskGraph, transforms: &mut TransformPalette, ctx: &RenderTargetContext, - output: &mut Vec<ClipMaskInstanceList>, + output: &mut ClipMaskInstanceList, ) { if let Some(ref sub_pass) = task.sub_pass { match sub_pass { @@ -1235,8 +1243,6 @@ fn build_sub_pass( content_origin + target_rect.size().to_f32(), ); - let mut clip_masks = ClipMaskInstanceList::new(); - build_mask_tasks( masks, render_task_address, @@ -1251,10 +1257,8 @@ fn build_sub_pass( gpu_buffer_builder, transforms, render_tasks, - &mut clip_masks, + output, ); - - output.push(clip_masks); } } } diff --git a/gfx/wr/webrender/src/render_task.rs b/gfx/wr/webrender/src/render_task.rs index 8889ae1ea6..bf9050712c 100644 --- a/gfx/wr/webrender/src/render_task.rs +++ b/gfx/wr/webrender/src/render_task.rs @@ -8,6 +8,7 @@ use api::MAX_RENDER_TASK_SIZE; use api::units::*; use crate::clip::{ClipDataStore, ClipItemKind, ClipStore, ClipNodeRange}; use crate::command_buffer::{CommandBufferIndex, QuadFlags}; +use crate::pattern::{PatternKind, PatternShaderInput}; use crate::spatial_tree::SpatialNodeIndex; use crate::filterdata::SFilterData; use crate::frame_builder::FrameBuilderConfig; @@ -184,6 +185,8 @@ pub struct EmptyTask { #[cfg_attr(feature = "capture", derive(Serialize))] #[cfg_attr(feature = "replay", derive(Deserialize))] pub struct PrimTask { + pub pattern: PatternKind, + pub pattern_input: PatternShaderInput, pub device_pixel_scale: DevicePixelScale, pub content_origin: DevicePoint, pub prim_address_f: GpuBufferAddress, @@ -516,6 +519,8 @@ impl RenderTaskKind { } pub fn new_prim( + pattern: PatternKind, + pattern_input: PatternShaderInput, prim_spatial_node_index: SpatialNodeIndex, raster_spatial_node_index: SpatialNodeIndex, device_pixel_scale: DevicePixelScale, @@ -528,6 +533,8 @@ impl RenderTaskKind { prim_needs_scissor_rect: bool, ) -> Self { RenderTaskKind::Prim(PrimTask { + pattern, + pattern_input, prim_spatial_node_index, raster_spatial_node_index, device_pixel_scale, @@ -940,9 +947,7 @@ impl RenderTask { size: DeviceIntSize, kind: RenderTaskKind, ) -> Self { - if size.is_empty() { - log::warn!("Bad {} render task size: {:?}", kind.as_str(), size); - } + assert!(!size.is_empty(), "Bad {} render task size: {:?}", kind.as_str(), size); RenderTask::new( RenderTaskLocation::Unallocated { size }, kind, diff --git a/gfx/wr/webrender/src/renderer/mod.rs b/gfx/wr/webrender/src/renderer/mod.rs index 3a058ae8f4..a70d3eca18 100644 --- a/gfx/wr/webrender/src/renderer/mod.rs +++ b/gfx/wr/webrender/src/renderer/mod.rs @@ -48,6 +48,7 @@ use api::channel::{Sender, Receiver}; pub use api::DebugFlags; use core::time::Duration; +use crate::pattern::PatternKind; use crate::render_api::{DebugCommand, ApiMsg, MemoryReport}; use crate::batch::{AlphaBatchContainer, BatchKind, BatchFeatures, BatchTextures, BrushBatchKind, ClipBatchList}; use crate::batch::{ClipMaskInstanceList}; @@ -286,7 +287,8 @@ impl BatchKind { } } BatchKind::TextRun(_) => GPU_TAG_PRIM_TEXT_RUN, - BatchKind::Primitive => GPU_TAG_PRIMITIVE, + BatchKind::Quad(PatternKind::ColorOrTexture) => GPU_TAG_PRIMITIVE, + BatchKind::Quad(PatternKind::Mask) => GPU_TAG_INDIRECT_MASK, } } } @@ -2171,8 +2173,8 @@ impl Renderer { fn handle_prims( &mut self, draw_target: &DrawTarget, - prim_instances: &[PrimitiveInstanceData], - prim_instances_with_scissor: &FastHashMap<DeviceIntRect, Vec<PrimitiveInstanceData>>, + prim_instances: &[Vec<PrimitiveInstanceData>], + prim_instances_with_scissor: &FastHashMap<(DeviceIntRect, PatternKind), Vec<PrimitiveInstanceData>>, projection: &default::Transform3D<f32>, stats: &mut RendererStats, ) { @@ -2181,10 +2183,17 @@ impl Renderer { { let _timer = self.gpu_profiler.start_timer(GPU_TAG_INDIRECT_PRIM); - if !prim_instances.is_empty() { + if prim_instances.iter().any(|instances| !instances.is_empty()) { self.set_blend(false, FramebufferKind::Other); + } - self.shaders.borrow_mut().ps_quad_textured.bind( + for (pattern_idx, prim_instances) in prim_instances.iter().enumerate() { + if prim_instances.is_empty() { + continue; + } + let pattern = PatternKind::from_u32(pattern_idx as u32); + + self.shaders.borrow_mut().get_quad_shader(pattern).bind( &mut self.device, projection, None, @@ -2192,10 +2201,13 @@ impl Renderer { &mut self.profile, ); + // TODO: Some patterns will need to be able to sample textures. + let texture_bindings = BatchTextures::empty(); + self.draw_instanced_batch( prim_instances, VertexArrayKind::Primitive, - &BatchTextures::empty(), + &texture_bindings, stats, ); } @@ -2205,17 +2217,22 @@ impl Renderer { self.device.set_blend_mode_premultiplied_alpha(); self.device.enable_scissor(); - self.shaders.borrow_mut().ps_quad_textured.bind( - &mut self.device, - projection, - None, - &mut self.renderer_errors, - &mut self.profile, - ); + let mut prev_pattern = None; - for (scissor_rect, prim_instances) in prim_instances_with_scissor { - self.device.set_scissor_rect(draw_target.to_framebuffer_rect(*scissor_rect)); + for ((scissor_rect, pattern), prim_instances) in prim_instances_with_scissor { + if prev_pattern != Some(*pattern) { + prev_pattern = Some(*pattern); + self.shaders.borrow_mut().get_quad_shader(*pattern).bind( + &mut self.device, + projection, + None, + &mut self.renderer_errors, + &mut self.profile, + ); + } + self.device.set_scissor_rect(draw_target.to_framebuffer_rect(*scissor_rect)); + // TODO: hook up the right pattern. self.draw_instanced_batch( prim_instances, VertexArrayKind::Primitive, @@ -3578,14 +3595,12 @@ impl Renderer { stats, ); - for clip_masks in &target.clip_masks { - self.handle_clips( - &draw_target, - clip_masks, - projection, - stats, - ); - } + self.handle_clips( + &draw_target, + &target.clip_masks, + projection, + stats, + ); if clear_depth.is_some() { self.device.invalidate_depth_target(); @@ -3840,14 +3855,12 @@ impl Renderer { stats, ); - for clip_masks in &target.clip_masks { - self.handle_clips( - &draw_target, - clip_masks, - projection, - stats, - ); - } + self.handle_clips( + &draw_target, + &target.clip_masks, + projection, + stats, + ); } self.gpu_profiler.finish_sampler(alpha_sampler); diff --git a/gfx/wr/webrender/src/renderer/shade.rs b/gfx/wr/webrender/src/renderer/shade.rs index 777bfab44a..96e8982aa0 100644 --- a/gfx/wr/webrender/src/renderer/shade.rs +++ b/gfx/wr/webrender/src/renderer/shade.rs @@ -6,6 +6,7 @@ use api::{ImageBufferKind, units::DeviceSize}; use crate::batch::{BatchKey, BatchKind, BrushBatchKind, BatchFeatures}; use crate::composite::{CompositeFeatures, CompositeSurfaceFormat}; use crate::device::{Device, Program, ShaderError}; +use crate::pattern::PatternKind; use euclid::default::Transform3D; use glyph_rasterizer::GlyphFormat; use crate::renderer::{ @@ -1153,6 +1154,16 @@ impl Shaders { .expect("bug: unsupported scale shader requested") } + pub fn get_quad_shader( + &mut self, + pattern: PatternKind + ) -> &mut LazilyCompiledShader { + match pattern { + PatternKind::ColorOrTexture => &mut self.ps_quad_textured, + PatternKind::Mask => unreachable!(), + } + } + pub fn get(& mut self, key: &BatchKey, @@ -1161,9 +1172,12 @@ impl Shaders { device: &Device, ) -> &mut LazilyCompiledShader { match key.kind { - BatchKind::Primitive => { + BatchKind::Quad(PatternKind::ColorOrTexture) => { &mut self.ps_quad_textured } + BatchKind::Quad(PatternKind::Mask) => { + unreachable!(); + } BatchKind::SplitComposite => { &mut self.ps_split_composite } diff --git a/gfx/wr/webrender/src/resource_cache.rs b/gfx/wr/webrender/src/resource_cache.rs index 349be25cb8..aee3cbe241 100644 --- a/gfx/wr/webrender/src/resource_cache.rs +++ b/gfx/wr/webrender/src/resource_cache.rs @@ -193,12 +193,6 @@ struct ImageResource { generation: ImageGeneration, } -#[derive(Clone, Debug)] -pub struct ImageTiling { - pub image_size: DeviceIntSize, - pub tile_size: TileSize, -} - #[derive(Default)] struct ImageTemplates { images: FastHashMap<ImageKey, ImageResource>, diff --git a/gfx/wr/webrender/src/texture_pack/mod.rs b/gfx/wr/webrender/src/texture_pack/mod.rs index f89a82b0a1..47de681cbe 100644 --- a/gfx/wr/webrender/src/texture_pack/mod.rs +++ b/gfx/wr/webrender/src/texture_pack/mod.rs @@ -57,8 +57,6 @@ pub trait AtlasAllocatorList<TextureParameters> { fn set_handle(&mut self, texture_id: CacheTextureId, alloc_id: AllocId, handle: &TextureCacheHandle); - fn remove_handle(&mut self, texture_id: CacheTextureId, alloc_id: AllocId); - /// Deallocate a rectangle and return its size. fn deallocate(&mut self, texture_id: CacheTextureId, alloc_id: AllocId); @@ -235,14 +233,6 @@ for AllocatorList<Allocator, TextureParameters> { unit.handles.insert(alloc_id, handle.clone()); } - fn remove_handle(&mut self, texture_id: CacheTextureId, alloc_id: AllocId) { - let unit = self.units - .iter_mut() - .find(|unit| unit.texture_id == texture_id) - .expect("Unable to find the associated texture array unit"); - unit.handles.remove(&alloc_id); - } - fn deallocate(&mut self, texture_id: CacheTextureId, alloc_id: AllocId) { self.deallocate(texture_id, alloc_id); } @@ -314,7 +304,6 @@ impl AtlasAllocator for ShelfAllocator { pub struct CompactionChange { pub handle: TextureCacheHandle, - pub old_id: AllocId, pub old_tex: CacheTextureId, pub old_rect: DeviceIntRect, pub new_id: AllocId, @@ -372,7 +361,6 @@ impl<P> AllocatorList<ShelfAllocator, P> { // Record the change so that the texture cache can do additional bookkeeping. changes.push(CompactionChange { handle, - old_id: AllocId(alloc.id.serialize()), old_tex: self.units[last_unit].texture_id, old_rect: alloc.rectangle.cast_unit(), new_id: AllocId(new_alloc.id.serialize()), diff --git a/gfx/wr/webrender/src/util.rs b/gfx/wr/webrender/src/util.rs index 9b16131327..fa02d87b91 100644 --- a/gfx/wr/webrender/src/util.rs +++ b/gfx/wr/webrender/src/util.rs @@ -66,9 +66,6 @@ pub trait VecHelper<T> { /// Equivalent to `mem::replace(&mut vec, Vec::new())` fn take(&mut self) -> Self; - /// Call clear and return self (useful for chaining with calls that move the vector). - fn cleared(self) -> Self; - /// Functionally equivalent to `mem::replace(&mut vec, Vec::new())` but tries /// to keep the allocation in the caller if it is empty or replace it with a /// pre-allocated vector. @@ -102,12 +99,6 @@ impl<T> VecHelper<T> for Vec<T> { replace(self, Vec::new()) } - fn cleared(mut self) -> Self { - self.clear(); - - self - } - fn take_and_preallocate(&mut self) -> Self { let len = self.len(); if len == 0 { @@ -399,10 +390,6 @@ pub trait MatrixHelpers<Src, Dst> { fn is_2d_scale_translation(&self) -> bool; /// Return the determinant of the 2D part of the matrix. fn determinant_2d(&self) -> f32; - /// This function returns a point in the `Src` space that projects into zero XY. - /// It ignores the Z coordinate and is usable for "flattened" transformations, - /// since they are not generally inversible. - fn inverse_project_2d_origin(&self) -> Option<Point2D<f32, Src>>; /// Turn Z transformation into identity. This is useful when crossing "flat" /// transform styled stacking contexts upon traversing the coordinate systems. fn flatten_z_output(&mut self); @@ -534,17 +521,6 @@ impl<Src, Dst> MatrixHelpers<Src, Dst> for Transform3D<f32, Src, Dst> { self.m11 * self.m22 - self.m12 * self.m21 } - fn inverse_project_2d_origin(&self) -> Option<Point2D<f32, Src>> { - let det = self.determinant_2d(); - if det != 0.0 { - let x = (self.m21 * self.m42 - self.m41 * self.m22) / det; - let y = (self.m12 * self.m41 - self.m11 * self.m42) / det; - Some(Point2D::new(x, y)) - } else { - None - } - } - fn flatten_z_output(&mut self) { self.m13 = 0.0; self.m23 = 0.0; @@ -623,22 +599,6 @@ impl<U> RectHelpers<U> for Box2D<f32, U> { } } -pub trait VectorHelpers<U> -where - Self: Sized, -{ - fn snap(&self) -> Self; -} - -impl<U> VectorHelpers<U> for Vector2D<f32, U> { - fn snap(&self) -> Self { - Vector2D::new( - (self.x + 0.5).floor(), - (self.y + 0.5).floor(), - ) - } -} - pub fn lerp(a: f32, b: f32, t: f32) -> f32 { (b - a) * t + a } @@ -874,22 +834,6 @@ pub mod test { } #[test] - fn inverse_project_2d_origin() { - let mut m = Transform3D::identity(); - assert_eq!(m.inverse_project_2d_origin(), Some(Point2D::zero())); - m.m11 = 0.0; - assert_eq!(m.inverse_project_2d_origin(), None); - m.m21 = -2.0; - m.m22 = 0.0; - m.m12 = -0.5; - m.m41 = 1.0; - m.m42 = 0.5; - let origin = m.inverse_project_2d_origin().unwrap(); - assert_eq!(origin, Point2D::new(1.0, 0.5)); - assert_eq!(m.transform_point2d(origin), Some(Point2D::zero())); - } - - #[test] fn polygon_clip_is_left_of_point() { // Define points of a line through (1, -3) and (-2, 6) to test against. // If the triplet consisting of these two points and the test point diff --git a/gfx/wr/webrender_api/src/gradient_builder.rs b/gfx/wr/webrender_api/src/gradient_builder.rs index 6347396f79..545b2d328a 100644 --- a/gfx/wr/webrender_api/src/gradient_builder.rs +++ b/gfx/wr/webrender_api/src/gradient_builder.rs @@ -132,10 +132,6 @@ impl GradientBuilder { let first = *stops.first().unwrap(); let last = *stops.last().unwrap(); - // Express the assertion so that if one of the offsets is NaN, we don't panic - // and instead take the branch that handles degenerate gradients. - assert!(!(first.offset > last.offset)); - let stops_delta = last.offset - first.offset; if stops_delta > 0.000001 { @@ -144,6 +140,14 @@ impl GradientBuilder { } (first.offset, last.offset) + } else if stops_delta.is_nan() { + // We have no good way to render a NaN offset, but make something + // that is at least renderable. + stops.clear(); + stops.push(di::GradientStop { color: last.color, offset: 0.0, }); + stops.push(di::GradientStop { color: last.color, offset: 1.0, }); + + (0.0, 1.0) } else { // We have a degenerate gradient and can't accurately transform the stops // what happens here depends on the repeat behavior, but in any case diff --git a/gfx/wr/wr_glyph_rasterizer/Cargo.toml b/gfx/wr/wr_glyph_rasterizer/Cargo.toml index b06cd0f084..93877277b5 100644 --- a/gfx/wr/wr_glyph_rasterizer/Cargo.toml +++ b/gfx/wr/wr_glyph_rasterizer/Cargo.toml @@ -25,7 +25,7 @@ tracy-rs = "0.1.2" log = "0.4" lazy_static = "1" fxhash = "0.2.1" -glean = { version = "58.1.0", optional = true } +glean = { version = "59.0.0", optional = true } firefox-on-glean = { version = "0.1.0", optional = true } serde = { optional = true, version = "1.0", features = ["serde_derive"] } diff --git a/gfx/wr/wr_glyph_rasterizer/src/platform/macos/font.rs b/gfx/wr/wr_glyph_rasterizer/src/platform/macos/font.rs index aa4e301d32..bdbdee96a1 100644 --- a/gfx/wr/wr_glyph_rasterizer/src/platform/macos/font.rs +++ b/gfx/wr/wr_glyph_rasterizer/src/platform/macos/font.rs @@ -18,7 +18,6 @@ use core_graphics::context::{CGBlendMode, CGTextDrawingMode}; use core_graphics::font::{CGFont, CGGlyph}; use core_graphics::geometry::{CGAffineTransform, CGPoint, CGSize}; use core_graphics::geometry::{CG_AFFINE_TRANSFORM_IDENTITY, CGRect}; -use core_text; use core_text::font::CTFont; use core_text::font_descriptor::{CTFontDescriptor, kCTFontDefaultOrientation}; use core_text::font_descriptor::{kCTFontURLAttribute, kCTFontVariationAttribute}; diff --git a/gfx/wr/wr_glyph_rasterizer/src/platform/windows/font.rs b/gfx/wr/wr_glyph_rasterizer/src/platform/windows/font.rs index 81232d70c4..20fccc626a 100644 --- a/gfx/wr/wr_glyph_rasterizer/src/platform/windows/font.rs +++ b/gfx/wr/wr_glyph_rasterizer/src/platform/windows/font.rs @@ -4,7 +4,6 @@ use api::{FontInstanceFlags, FontKey, FontRenderMode, FontVariation}; use api::{ColorU, GlyphDimensions, NativeFontHandle}; -use dwrote; use crate::gamma_lut::{ColorLut, GammaLut}; use crate::rasterizer::{FontInstance, FontTransform, GlyphKey}; use crate::rasterizer::{GlyphFormat, GlyphRasterError, GlyphRasterResult, RasterizedGlyph}; |