| 1 | /* |
| 2 | * Copyright (C) 2006 Samuel Weinig (sam.weinig@gmail.com) |
| 3 | * Copyright (C) 2004, 2005, 2006, 2008, 2015 Apple Inc. All rights reserved. |
| 4 | * |
| 5 | * Redistribution and use in source and binary forms, with or without |
| 6 | * modification, are permitted provided that the following conditions |
| 7 | * are met: |
| 8 | * 1. Redistributions of source code must retain the above copyright |
| 9 | * notice, this list of conditions and the following disclaimer. |
| 10 | * 2. Redistributions in binary form must reproduce the above copyright |
| 11 | * notice, this list of conditions and the following disclaimer in the |
| 12 | * documentation and/or other materials provided with the distribution. |
| 13 | * |
| 14 | * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY |
| 15 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
| 16 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
| 17 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR |
| 18 | * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, |
| 19 | * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, |
| 20 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR |
| 21 | * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY |
| 22 | * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| 23 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| 24 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 25 | */ |
| 26 | |
| 27 | #include "config.h" |
| 28 | #include "BitmapImage.h" |
| 29 | |
| 30 | #include "FloatRect.h" |
| 31 | #include "GraphicsContext.h" |
| 32 | #include "ImageBuffer.h" |
| 33 | #include "ImageObserver.h" |
| 34 | #include "IntRect.h" |
| 35 | #include "Logging.h" |
| 36 | #include "Settings.h" |
| 37 | #include "Timer.h" |
| 38 | #include <wtf/Vector.h> |
| 39 | #include <wtf/text/TextStream.h> |
| 40 | #include <wtf/text/WTFString.h> |
| 41 | |
| 42 | namespace WebCore { |
| 43 | |
| 44 | BitmapImage::BitmapImage(ImageObserver* observer) |
| 45 | : Image(observer) |
| 46 | , m_source(ImageSource::create(this)) |
| 47 | { |
| 48 | } |
| 49 | |
| 50 | BitmapImage::BitmapImage(NativeImagePtr&& image, ImageObserver* observer) |
| 51 | : Image(observer) |
| 52 | , m_source(ImageSource::create(WTFMove(image))) |
| 53 | { |
| 54 | } |
| 55 | |
| 56 | BitmapImage::~BitmapImage() |
| 57 | { |
| 58 | invalidatePlatformData(); |
| 59 | clearTimer(); |
| 60 | m_source->clearImage(); |
| 61 | m_source->stopAsyncDecodingQueue(); |
| 62 | } |
| 63 | |
| 64 | void BitmapImage::updateFromSettings(const Settings& settings) |
| 65 | { |
| 66 | m_allowSubsampling = settings.imageSubsamplingEnabled(); |
| 67 | m_allowAnimatedImageAsyncDecoding = settings.animatedImageAsyncDecodingEnabled(); |
| 68 | m_showDebugBackground = settings.showDebugBorders(); |
| 69 | } |
| 70 | |
| 71 | void BitmapImage::destroyDecodedData(bool destroyAll) |
| 72 | { |
| 73 | LOG(Images, "BitmapImage::%s - %p - url: %s" , __FUNCTION__, this, sourceURL().string().utf8().data()); |
| 74 | |
| 75 | if (!destroyAll) |
| 76 | m_source->destroyDecodedDataBeforeFrame(m_currentFrame); |
| 77 | else if (!canDestroyDecodedData()) |
| 78 | m_source->destroyAllDecodedDataExcludeFrame(m_currentFrame); |
| 79 | else { |
| 80 | m_source->destroyAllDecodedData(); |
| 81 | m_currentFrameDecodingStatus = DecodingStatus::Invalid; |
| 82 | } |
| 83 | |
| 84 | // There's no need to throw away the decoder unless we're explicitly asked |
| 85 | // to destroy all of the frames. |
| 86 | if (!destroyAll || m_source->hasAsyncDecodingQueue()) |
| 87 | m_source->clearFrameBufferCache(m_currentFrame); |
| 88 | else |
| 89 | m_source->resetData(data()); |
| 90 | |
| 91 | invalidatePlatformData(); |
| 92 | } |
| 93 | |
| 94 | void BitmapImage::destroyDecodedDataIfNecessary(bool destroyAll) |
| 95 | { |
| 96 | // If we have decoded frames but there is no encoded data, we shouldn't destroy |
| 97 | // the decoded image since we won't be able to reconstruct it later. |
| 98 | if (!data() && frameCount()) |
| 99 | return; |
| 100 | |
| 101 | if (m_source->decodedSize() < LargeAnimationCutoff) |
| 102 | return; |
| 103 | |
| 104 | destroyDecodedData(destroyAll); |
| 105 | } |
| 106 | |
| 107 | EncodedDataStatus BitmapImage::dataChanged(bool allDataReceived) |
| 108 | { |
| 109 | if (m_source->decodedSize() && !canUseAsyncDecodingForLargeImages()) |
| 110 | m_source->destroyIncompleteDecodedData(); |
| 111 | |
| 112 | m_currentFrameDecodingStatus = DecodingStatus::Invalid; |
| 113 | return m_source->dataChanged(data(), allDataReceived); |
| 114 | } |
| 115 | |
| 116 | void BitmapImage::setCurrentFrameDecodingStatusIfNecessary(DecodingStatus decodingStatus) |
| 117 | { |
| 118 | // When new data is received, m_currentFrameDecodingStatus is set to DecodingStatus::Invalid |
| 119 | // to force decoding the frame when it's drawn. m_currentFrameDecodingStatus should not be |
| 120 | // changed in this case till draw() is called and sets its value to DecodingStatus::Decoding. |
| 121 | if (m_currentFrameDecodingStatus != DecodingStatus::Decoding) |
| 122 | return; |
| 123 | m_currentFrameDecodingStatus = decodingStatus; |
| 124 | } |
| 125 | |
| 126 | NativeImagePtr BitmapImage::frameImageAtIndexCacheIfNeeded(size_t index, SubsamplingLevel subsamplingLevel, const GraphicsContext* targetContext) |
| 127 | { |
| 128 | if (!frameHasFullSizeNativeImageAtIndex(index, subsamplingLevel)) { |
| 129 | LOG(Images, "BitmapImage::%s - %p - url: %s [subsamplingLevel was %d, resampling]" , __FUNCTION__, this, sourceURL().string().utf8().data(), static_cast<int>(frameSubsamplingLevelAtIndex(index))); |
| 130 | invalidatePlatformData(); |
| 131 | } |
| 132 | #if USE(DIRECT2D) |
| 133 | m_source->setTargetContext(targetContext); |
| 134 | #else |
| 135 | UNUSED_PARAM(targetContext); |
| 136 | #endif |
| 137 | return m_source->frameImageAtIndexCacheIfNeeded(index, subsamplingLevel); |
| 138 | } |
| 139 | |
| 140 | NativeImagePtr BitmapImage::nativeImage(const GraphicsContext* targetContext) |
| 141 | { |
| 142 | return frameImageAtIndexCacheIfNeeded(0, SubsamplingLevel::Default, targetContext); |
| 143 | } |
| 144 | |
| 145 | NativeImagePtr BitmapImage::nativeImageForCurrentFrame(const GraphicsContext* targetContext) |
| 146 | { |
| 147 | return frameImageAtIndexCacheIfNeeded(m_currentFrame, SubsamplingLevel::Default, targetContext); |
| 148 | } |
| 149 | |
| 150 | #if USE(CG) |
| 151 | NativeImagePtr BitmapImage::nativeImageOfSize(const IntSize& size, const GraphicsContext* targetContext) |
| 152 | { |
| 153 | size_t count = frameCount(); |
| 154 | |
| 155 | for (size_t i = 0; i < count; ++i) { |
| 156 | auto image = frameImageAtIndexCacheIfNeeded(i, SubsamplingLevel::Default, targetContext); |
| 157 | if (image && nativeImageSize(image) == size) |
| 158 | return image; |
| 159 | } |
| 160 | |
| 161 | // Fallback to the first frame image if we can't find the right size |
| 162 | return frameImageAtIndexCacheIfNeeded(0, SubsamplingLevel::Default, targetContext); |
| 163 | } |
| 164 | |
| 165 | Vector<NativeImagePtr> BitmapImage::framesNativeImages() |
| 166 | { |
| 167 | Vector<NativeImagePtr> images; |
| 168 | size_t count = frameCount(); |
| 169 | |
| 170 | for (size_t i = 0; i < count; ++i) { |
| 171 | if (auto image = frameImageAtIndexCacheIfNeeded(i)) |
| 172 | images.append(image); |
| 173 | } |
| 174 | |
| 175 | return images; |
| 176 | } |
| 177 | #endif |
| 178 | |
| 179 | #if !ASSERT_DISABLED |
| 180 | bool BitmapImage::notSolidColor() |
| 181 | { |
| 182 | return size().width() != 1 || size().height() != 1 || frameCount() > 1; |
| 183 | } |
| 184 | #endif |
| 185 | |
| 186 | ImageDrawResult BitmapImage::draw(GraphicsContext& context, const FloatRect& destRect, const FloatRect& srcRect, CompositeOperator op, BlendMode mode, DecodingMode decodingMode, ImageOrientationDescription description) |
| 187 | { |
| 188 | if (destRect.isEmpty() || srcRect.isEmpty()) |
| 189 | return ImageDrawResult::DidNothing; |
| 190 | |
| 191 | FloatSize scaleFactorForDrawing = context.scaleFactorForDrawing(destRect, srcRect); |
| 192 | IntSize sizeForDrawing = expandedIntSize(size() * scaleFactorForDrawing); |
| 193 | ImageDrawResult result = ImageDrawResult::DidDraw; |
| 194 | |
| 195 | m_currentSubsamplingLevel = m_allowSubsampling ? subsamplingLevelForScaleFactor(context, scaleFactorForDrawing) : SubsamplingLevel::Default; |
| 196 | LOG(Images, "BitmapImage::%s - %p - url: %s [subsamplingLevel = %d scaleFactorForDrawing = (%.4f, %.4f)]" , __FUNCTION__, this, sourceURL().string().utf8().data(), static_cast<int>(m_currentSubsamplingLevel), scaleFactorForDrawing.width(), scaleFactorForDrawing.height()); |
| 197 | |
| 198 | NativeImagePtr image; |
| 199 | if (decodingMode == DecodingMode::Asynchronous) { |
| 200 | ASSERT(!canAnimate()); |
| 201 | ASSERT(!m_currentFrame || m_animationFinished); |
| 202 | |
| 203 | bool frameIsCompatible = frameHasDecodedNativeImageCompatibleWithOptionsAtIndex(m_currentFrame, m_currentSubsamplingLevel, DecodingOptions(sizeForDrawing)); |
| 204 | bool frameIsBeingDecoded = frameIsBeingDecodedAndIsCompatibleWithOptionsAtIndex(m_currentFrame, DecodingOptions(sizeForDrawing)); |
| 205 | |
| 206 | // If the current frame is incomplete, a new request for decoding this frame has to be made even if |
| 207 | // it is currently being decoded. New data may have been received since the previous request was made. |
| 208 | if ((!frameIsCompatible && !frameIsBeingDecoded) || m_currentFrameDecodingStatus == DecodingStatus::Invalid) { |
| 209 | LOG(Images, "BitmapImage::%s - %p - url: %s [requesting large async decoding]" , __FUNCTION__, this, sourceURL().string().utf8().data()); |
| 210 | m_source->requestFrameAsyncDecodingAtIndex(m_currentFrame, m_currentSubsamplingLevel, sizeForDrawing); |
| 211 | m_currentFrameDecodingStatus = DecodingStatus::Decoding; |
| 212 | } |
| 213 | |
| 214 | if (m_currentFrameDecodingStatus == DecodingStatus::Decoding) |
| 215 | result = ImageDrawResult::DidRequestDecoding; |
| 216 | |
| 217 | if (!frameHasDecodedNativeImageCompatibleWithOptionsAtIndex(m_currentFrame, m_currentSubsamplingLevel, DecodingOptions(DecodingMode::Asynchronous))) { |
| 218 | if (m_showDebugBackground) |
| 219 | fillWithSolidColor(context, destRect, Color(Color::yellow).colorWithAlpha(0.5), op); |
| 220 | return result; |
| 221 | } |
| 222 | |
| 223 | image = frameImageAtIndex(m_currentFrame); |
| 224 | LOG(Images, "BitmapImage::%s - %p - url: %s [a decoded frame will be used for asynchronous drawing]" , __FUNCTION__, this, sourceURL().string().utf8().data()); |
| 225 | } else { |
| 226 | StartAnimationStatus status = internalStartAnimation(); |
| 227 | ASSERT_IMPLIES(status == StartAnimationStatus::DecodingActive, (!m_currentFrame && !m_repetitionsComplete) || frameHasFullSizeNativeImageAtIndex(m_currentFrame, m_currentSubsamplingLevel)); |
| 228 | |
| 229 | if (status == StartAnimationStatus::DecodingActive && m_showDebugBackground) { |
| 230 | fillWithSolidColor(context, destRect, Color(Color::yellow).colorWithAlpha(0.5), op); |
| 231 | return result; |
| 232 | } |
| 233 | |
| 234 | // If the decodingMode changes from asynchronous to synchronous and new data is received, |
| 235 | // the current incomplete decoded frame has to be destroyed. |
| 236 | if (m_currentFrameDecodingStatus == DecodingStatus::Invalid) |
| 237 | m_source->destroyIncompleteDecodedData(); |
| 238 | |
| 239 | bool frameIsCompatible = frameHasDecodedNativeImageCompatibleWithOptionsAtIndex(m_currentFrame, m_currentSubsamplingLevel, DecodingOptions(sizeForDrawing)); |
| 240 | bool frameIsBeingDecoded = frameIsBeingDecodedAndIsCompatibleWithOptionsAtIndex(m_currentFrame, DecodingOptions(DecodingMode::Asynchronous)); |
| 241 | |
| 242 | if (frameIsCompatible) { |
| 243 | image = frameImageAtIndex(m_currentFrame); |
| 244 | LOG(Images, "BitmapImage::%s - %p - url: %s [a decoded frame will reused for synchronous drawing]" , __FUNCTION__, this, sourceURL().string().utf8().data()); |
| 245 | } else if (frameIsBeingDecoded) { |
| 246 | // FIXME: instead of showing the yellow rectangle and returning we need to wait for this frame to finish decoding. |
| 247 | if (m_showDebugBackground) { |
| 248 | fillWithSolidColor(context, destRect, Color(Color::yellow).colorWithAlpha(0.5), op); |
| 249 | LOG(Images, "BitmapImage::%s - %p - url: %s [waiting for async decoding to finish]" , __FUNCTION__, this, sourceURL().string().utf8().data()); |
| 250 | } |
| 251 | return ImageDrawResult::DidRequestDecoding; |
| 252 | } else { |
| 253 | image = frameImageAtIndexCacheIfNeeded(m_currentFrame, m_currentSubsamplingLevel, &context); |
| 254 | LOG(Images, "BitmapImage::%s - %p - url: %s [an image frame will be decoded synchronously]" , __FUNCTION__, this, sourceURL().string().utf8().data()); |
| 255 | } |
| 256 | |
| 257 | if (!image) // If it's too early we won't have an image yet. |
| 258 | return ImageDrawResult::DidNothing; |
| 259 | |
| 260 | if (m_currentFrameDecodingStatus != DecodingStatus::Complete) |
| 261 | ++m_decodeCountForTesting; |
| 262 | } |
| 263 | |
| 264 | ASSERT(image); |
| 265 | Color color = singlePixelSolidColor(); |
| 266 | if (color.isValid()) { |
| 267 | fillWithSolidColor(context, destRect, color, op); |
| 268 | return result; |
| 269 | } |
| 270 | |
| 271 | ImageOrientation orientation(description.imageOrientation()); |
| 272 | if (description.respectImageOrientation() == RespectImageOrientation) |
| 273 | orientation = frameOrientationAtIndex(m_currentFrame); |
| 274 | |
| 275 | drawNativeImage(image, context, destRect, srcRect, IntSize(size()), op, mode, orientation); |
| 276 | m_currentFrameDecodingStatus = frameDecodingStatusAtIndex(m_currentFrame); |
| 277 | |
| 278 | if (imageObserver()) |
| 279 | imageObserver()->didDraw(*this); |
| 280 | |
| 281 | return result; |
| 282 | } |
| 283 | |
| 284 | void BitmapImage::drawPattern(GraphicsContext& ctxt, const FloatRect& destRect, const FloatRect& tileRect, const AffineTransform& transform, const FloatPoint& phase, const FloatSize& spacing, CompositeOperator op, BlendMode blendMode) |
| 285 | { |
| 286 | if (tileRect.isEmpty()) |
| 287 | return; |
| 288 | |
| 289 | if (!ctxt.drawLuminanceMask()) { |
| 290 | // If new data is received, the current incomplete decoded frame has to be destroyed. |
| 291 | if (m_currentFrameDecodingStatus == DecodingStatus::Invalid) |
| 292 | m_source->destroyIncompleteDecodedData(); |
| 293 | |
| 294 | Image::drawPattern(ctxt, destRect, tileRect, transform, phase, spacing, op, blendMode); |
| 295 | m_currentFrameDecodingStatus = frameDecodingStatusAtIndex(m_currentFrame); |
| 296 | return; |
| 297 | } |
| 298 | |
| 299 | if (!m_cachedImage) { |
| 300 | auto buffer = ImageBuffer::createCompatibleBuffer(expandedIntSize(tileRect.size()), ColorSpaceSRGB, ctxt); |
| 301 | if (!buffer) |
| 302 | return; |
| 303 | |
| 304 | ImageObserver* observer = imageObserver(); |
| 305 | |
| 306 | // Temporarily reset image observer, we don't want to receive any changeInRect() calls due to this relayout. |
| 307 | setImageObserver(nullptr); |
| 308 | |
| 309 | draw(buffer->context(), tileRect, tileRect, op, blendMode, DecodingMode::Synchronous, ImageOrientationDescription()); |
| 310 | |
| 311 | setImageObserver(observer); |
| 312 | buffer->convertToLuminanceMask(); |
| 313 | |
| 314 | m_cachedImage = ImageBuffer::sinkIntoImage(WTFMove(buffer), PreserveResolution::Yes); |
| 315 | if (!m_cachedImage) |
| 316 | return; |
| 317 | } |
| 318 | |
| 319 | ctxt.setDrawLuminanceMask(false); |
| 320 | m_cachedImage->drawPattern(ctxt, destRect, tileRect, transform, phase, spacing, op, blendMode); |
| 321 | } |
| 322 | |
| 323 | bool BitmapImage::shouldAnimate() const |
| 324 | { |
| 325 | return repetitionCount() && !m_animationFinished && imageObserver(); |
| 326 | } |
| 327 | |
| 328 | bool BitmapImage::canAnimate() const |
| 329 | { |
| 330 | return shouldAnimate() && frameCount() > 1; |
| 331 | } |
| 332 | |
| 333 | bool BitmapImage::canUseAsyncDecodingForLargeImages() const |
| 334 | { |
| 335 | return !canAnimate() && m_source->canUseAsyncDecoding(); |
| 336 | } |
| 337 | |
| 338 | bool BitmapImage::shouldUseAsyncDecodingForAnimatedImages() const |
| 339 | { |
| 340 | return canAnimate() && m_allowAnimatedImageAsyncDecoding && (shouldUseAsyncDecodingForTesting() || m_source->canUseAsyncDecoding()); |
| 341 | } |
| 342 | |
| 343 | void BitmapImage::clearTimer() |
| 344 | { |
| 345 | m_frameTimer = nullptr; |
| 346 | } |
| 347 | |
| 348 | void BitmapImage::startTimer(Seconds delay) |
| 349 | { |
| 350 | ASSERT(!m_frameTimer); |
| 351 | m_frameTimer = std::make_unique<Timer>(*this, &BitmapImage::advanceAnimation); |
| 352 | m_frameTimer->startOneShot(delay); |
| 353 | } |
| 354 | |
| 355 | SubsamplingLevel BitmapImage::subsamplingLevelForScaleFactor(GraphicsContext& context, const FloatSize& scaleFactor) |
| 356 | { |
| 357 | #if USE(CG) |
| 358 | // Never use subsampled images for drawing into PDF contexts. |
| 359 | if (CGContextGetType(context.platformContext()) == kCGContextTypePDF) |
| 360 | return SubsamplingLevel::Default; |
| 361 | |
| 362 | float scale = std::min(float(1), std::max(scaleFactor.width(), scaleFactor.height())); |
| 363 | if (!(scale > 0 && scale <= 1)) |
| 364 | return SubsamplingLevel::Default; |
| 365 | |
| 366 | int result = std::ceil(std::log2(1 / scale)); |
| 367 | return static_cast<SubsamplingLevel>(std::min(result, static_cast<int>(m_source->maximumSubsamplingLevel()))); |
| 368 | #else |
| 369 | UNUSED_PARAM(context); |
| 370 | UNUSED_PARAM(scaleFactor); |
| 371 | return SubsamplingLevel::Default; |
| 372 | #endif |
| 373 | } |
| 374 | |
| 375 | bool BitmapImage::canDestroyDecodedData() |
| 376 | { |
| 377 | // Animated images should preserve the current frame till the next one finishes decoding. |
| 378 | if (m_source->hasAsyncDecodingQueue()) |
| 379 | return false; |
| 380 | |
| 381 | // Small image should be decoded synchronously. Deleting its decoded frame is fine. |
| 382 | if (!canUseAsyncDecodingForLargeImages()) |
| 383 | return true; |
| 384 | |
| 385 | return !imageObserver() || imageObserver()->canDestroyDecodedData(*this); |
| 386 | } |
| 387 | |
| 388 | BitmapImage::StartAnimationStatus BitmapImage::internalStartAnimation() |
| 389 | { |
| 390 | LOG_WITH_STREAM(Images, stream << "BitmapImage " << this << " internalStartAnimation" ); |
| 391 | |
| 392 | if (!canAnimate()) |
| 393 | return StartAnimationStatus::CannotStart; |
| 394 | |
| 395 | if (m_frameTimer) |
| 396 | return StartAnimationStatus::TimerActive; |
| 397 | |
| 398 | // Don't start a new animation until we draw the frame that is currently being decoded. |
| 399 | size_t nextFrame = (m_currentFrame + 1) % frameCount(); |
| 400 | if (frameIsBeingDecodedAndIsCompatibleWithOptionsAtIndex(nextFrame, DecodingOptions(DecodingMode::Asynchronous))) { |
| 401 | LOG(Images, "BitmapImage::%s - %p - url: %s [nextFrame = %ld is being decoded]" , __FUNCTION__, this, sourceURL().string().utf8().data(), nextFrame); |
| 402 | return StartAnimationStatus::DecodingActive; |
| 403 | } |
| 404 | |
| 405 | if (m_currentFrame >= frameCount() - 1) { |
| 406 | // Don't advance past the last frame if we haven't decoded the whole image |
| 407 | // yet and our repetition count is potentially unset. The repetition count |
| 408 | // in a GIF can potentially come after all the rest of the image data, so |
| 409 | // wait on it. |
| 410 | if (!m_source->isAllDataReceived() && repetitionCount() == RepetitionCountOnce) |
| 411 | return StartAnimationStatus::IncompleteData; |
| 412 | |
| 413 | ++m_repetitionsComplete; |
| 414 | |
| 415 | // Check for the end of animation. |
| 416 | if (repetitionCount() != RepetitionCountInfinite && m_repetitionsComplete >= repetitionCount()) { |
| 417 | m_animationFinished = true; |
| 418 | destroyDecodedDataIfNecessary(false); |
| 419 | return StartAnimationStatus::CannotStart; |
| 420 | } |
| 421 | |
| 422 | destroyDecodedDataIfNecessary(true); |
| 423 | } |
| 424 | |
| 425 | // Don't advance the animation to an incomplete frame. |
| 426 | if (!m_source->isAllDataReceived() && !frameIsCompleteAtIndex(nextFrame)) |
| 427 | return StartAnimationStatus::IncompleteData; |
| 428 | |
| 429 | MonotonicTime time = MonotonicTime::now(); |
| 430 | |
| 431 | // Handle initial state. |
| 432 | if (!m_desiredFrameStartTime) |
| 433 | m_desiredFrameStartTime = time; |
| 434 | |
| 435 | // Setting 'm_desiredFrameStartTime' to 'time' means we are late; otherwise we are early. |
| 436 | m_desiredFrameStartTime = std::max(time, m_desiredFrameStartTime + Seconds { frameDurationAtIndex(m_currentFrame) }); |
| 437 | |
| 438 | // Request async decoding for nextFrame only if this is required. If nextFrame is not in the frameCache, |
| 439 | // it will be decoded on a separate work queue. When decoding nextFrame finishes, we will be notified |
| 440 | // through the callback newFrameNativeImageAvailableAtIndex(). Otherwise, advanceAnimation() will be called |
| 441 | // when the timer fires and m_currentFrame will be advanced to nextFrame since it is not being decoded. |
| 442 | if (shouldUseAsyncDecodingForAnimatedImages()) { |
| 443 | if (frameHasDecodedNativeImageCompatibleWithOptionsAtIndex(nextFrame, m_currentSubsamplingLevel, DecodingOptions(Optional<IntSize>()))) |
| 444 | LOG(Images, "BitmapImage::%s - %p - url: %s [cachedFrameCount = %ld nextFrame = %ld]" , __FUNCTION__, this, sourceURL().string().utf8().data(), ++m_cachedFrameCount, nextFrame); |
| 445 | else { |
| 446 | m_source->requestFrameAsyncDecodingAtIndex(nextFrame, m_currentSubsamplingLevel); |
| 447 | m_currentFrameDecodingStatus = DecodingStatus::Decoding; |
| 448 | LOG(Images, "BitmapImage::%s - %p - url: %s [requesting async decoding for nextFrame = %ld]" , __FUNCTION__, this, sourceURL().string().utf8().data(), nextFrame); |
| 449 | } |
| 450 | |
| 451 | if (m_clearDecoderAfterAsyncFrameRequestForTesting) |
| 452 | m_source->resetData(data()); |
| 453 | } |
| 454 | |
| 455 | ASSERT(!m_frameTimer); |
| 456 | startTimer(m_desiredFrameStartTime - time); |
| 457 | return StartAnimationStatus::Started; |
| 458 | } |
| 459 | |
| 460 | void BitmapImage::advanceAnimation() |
| 461 | { |
| 462 | clearTimer(); |
| 463 | |
| 464 | // Don't advance to nextFrame unless its decoding has finished or was not required. |
| 465 | size_t nextFrame = (m_currentFrame + 1) % frameCount(); |
| 466 | if (!frameIsBeingDecodedAndIsCompatibleWithOptionsAtIndex(nextFrame, DecodingOptions(DecodingMode::Asynchronous))) |
| 467 | internalAdvanceAnimation(); |
| 468 | else { |
| 469 | // Force repaint if showDebugBackground() is on. |
| 470 | if (m_showDebugBackground) |
| 471 | imageObserver()->changedInRect(*this); |
| 472 | LOG(Images, "BitmapImage::%s - %p - url: %s [lateFrameCount = %ld nextFrame = %ld]" , __FUNCTION__, this, sourceURL().string().utf8().data(), ++m_lateFrameCount, nextFrame); |
| 473 | } |
| 474 | } |
| 475 | |
| 476 | void BitmapImage::internalAdvanceAnimation() |
| 477 | { |
| 478 | m_currentFrame = (m_currentFrame + 1) % frameCount(); |
| 479 | ASSERT(!frameIsBeingDecodedAndIsCompatibleWithOptionsAtIndex(m_currentFrame, DecodingOptions(DecodingMode::Asynchronous))); |
| 480 | |
| 481 | destroyDecodedDataIfNecessary(false); |
| 482 | |
| 483 | DecodingStatus decodingStatus = frameDecodingStatusAtIndex(m_currentFrame); |
| 484 | setCurrentFrameDecodingStatusIfNecessary(decodingStatus); |
| 485 | |
| 486 | callDecodingCallbacks(); |
| 487 | |
| 488 | if (imageObserver()) |
| 489 | imageObserver()->imageFrameAvailable(*this, ImageAnimatingState::Yes, nullptr, decodingStatus); |
| 490 | |
| 491 | LOG(Images, "BitmapImage::%s - %p - url: %s [m_currentFrame = %ld]" , __FUNCTION__, this, sourceURL().string().utf8().data(), m_currentFrame); |
| 492 | } |
| 493 | |
| 494 | bool BitmapImage::isAnimating() const |
| 495 | { |
| 496 | return !!m_frameTimer; |
| 497 | } |
| 498 | |
| 499 | void BitmapImage::stopAnimation() |
| 500 | { |
| 501 | // This timer is used to animate all occurrences of this image. Don't invalidate |
| 502 | // the timer unless all renderers have stopped drawing. |
| 503 | clearTimer(); |
| 504 | if (canAnimate()) |
| 505 | m_source->stopAsyncDecodingQueue(); |
| 506 | } |
| 507 | |
| 508 | void BitmapImage::resetAnimation() |
| 509 | { |
| 510 | stopAnimation(); |
| 511 | m_currentFrame = 0; |
| 512 | m_repetitionsComplete = RepetitionCountNone; |
| 513 | m_desiredFrameStartTime = { }; |
| 514 | m_animationFinished = false; |
| 515 | |
| 516 | // For extremely large animations, when the animation is reset, we just throw everything away. |
| 517 | destroyDecodedDataIfNecessary(true); |
| 518 | } |
| 519 | |
| 520 | void BitmapImage::decode(WTF::Function<void()>&& callback) |
| 521 | { |
| 522 | if (!m_decodingCallbacks) |
| 523 | m_decodingCallbacks = std::make_unique<Vector<Function<void()>, 1>>(); |
| 524 | |
| 525 | m_decodingCallbacks->append(WTFMove(callback)); |
| 526 | |
| 527 | if (canAnimate()) { |
| 528 | if (m_desiredFrameStartTime) { |
| 529 | internalStartAnimation(); |
| 530 | return; |
| 531 | } |
| 532 | |
| 533 | // The animated image has not been displayed. In this case, either the first frame has not been decoded yet or the animation has not started yet. |
| 534 | bool frameIsCompatible = frameHasDecodedNativeImageCompatibleWithOptionsAtIndex(m_currentFrame, m_currentSubsamplingLevel, Optional<IntSize>()); |
| 535 | bool frameIsBeingDecoded = frameIsBeingDecodedAndIsCompatibleWithOptionsAtIndex(m_currentFrame, Optional<IntSize>()); |
| 536 | |
| 537 | if (frameIsCompatible) |
| 538 | internalStartAnimation(); |
| 539 | else if (!frameIsBeingDecoded) { |
| 540 | m_source->requestFrameAsyncDecodingAtIndex(m_currentFrame, m_currentSubsamplingLevel, Optional<IntSize>()); |
| 541 | m_currentFrameDecodingStatus = DecodingStatus::Decoding; |
| 542 | } |
| 543 | return; |
| 544 | } |
| 545 | |
| 546 | bool frameIsCompatible = frameHasDecodedNativeImageCompatibleWithOptionsAtIndex(m_currentFrame, m_currentSubsamplingLevel, Optional<IntSize>()); |
| 547 | bool frameIsBeingDecoded = frameIsBeingDecodedAndIsCompatibleWithOptionsAtIndex(m_currentFrame, Optional<IntSize>()); |
| 548 | |
| 549 | if (frameIsCompatible) |
| 550 | callDecodingCallbacks(); |
| 551 | else if (!frameIsBeingDecoded) { |
| 552 | m_source->requestFrameAsyncDecodingAtIndex(m_currentFrame, m_currentSubsamplingLevel, Optional<IntSize>()); |
| 553 | m_currentFrameDecodingStatus = DecodingStatus::Decoding; |
| 554 | } |
| 555 | } |
| 556 | |
| 557 | void BitmapImage::callDecodingCallbacks() |
| 558 | { |
| 559 | if (!m_decodingCallbacks) |
| 560 | return; |
| 561 | for (auto& decodingCallback : *m_decodingCallbacks) |
| 562 | decodingCallback(); |
| 563 | m_decodingCallbacks = nullptr; |
| 564 | } |
| 565 | |
| 566 | void BitmapImage::imageFrameAvailableAtIndex(size_t index) |
| 567 | { |
| 568 | LOG(Images, "BitmapImage::%s - %p - url: %s [requested frame %ld is now available]" , __FUNCTION__, this, sourceURL().string().utf8().data(), index); |
| 569 | |
| 570 | if (canAnimate()) { |
| 571 | if (index == (m_currentFrame + 1) % frameCount()) { |
| 572 | // Don't advance to nextFrame unless the timer was fired before its decoding finishes. |
| 573 | if (!m_frameTimer) |
| 574 | internalAdvanceAnimation(); |
| 575 | else |
| 576 | LOG(Images, "BitmapImage::%s - %p - url: %s [earlyFrameCount = %ld nextFrame = %ld]" , __FUNCTION__, this, sourceURL().string().utf8().data(), ++m_earlyFrameCount, index); |
| 577 | return; |
| 578 | } |
| 579 | |
| 580 | // Because of image partial loading, an image may start decoding as a large static image. But |
| 581 | // when more data is received, frameCount() changes to be > 1 so the image starts animating. |
| 582 | // The animation may even start before finishing the decoding of the first frame. |
| 583 | ASSERT(!m_repetitionsComplete); |
| 584 | LOG(Images, "BitmapImage::%s - %p - url: %s [More data makes frameCount() > 1]" , __FUNCTION__, this, sourceURL().string().utf8().data()); |
| 585 | } |
| 586 | |
| 587 | ASSERT(index == m_currentFrame && !m_currentFrame); |
| 588 | if (m_source->isAsyncDecodingQueueIdle()) |
| 589 | m_source->stopAsyncDecodingQueue(); |
| 590 | |
| 591 | DecodingStatus decodingStatus = frameDecodingStatusAtIndex(m_currentFrame); |
| 592 | setCurrentFrameDecodingStatusIfNecessary(decodingStatus); |
| 593 | |
| 594 | if (m_currentFrameDecodingStatus == DecodingStatus::Complete) |
| 595 | ++m_decodeCountForTesting; |
| 596 | |
| 597 | // Call m_decodingCallbacks only if the image frame was decoded with the native size. |
| 598 | if (frameHasDecodedNativeImageCompatibleWithOptionsAtIndex(m_currentFrame, m_currentSubsamplingLevel, Optional<IntSize>())) |
| 599 | callDecodingCallbacks(); |
| 600 | |
| 601 | if (imageObserver()) |
| 602 | imageObserver()->imageFrameAvailable(*this, ImageAnimatingState::No, nullptr, decodingStatus); |
| 603 | } |
| 604 | |
| 605 | unsigned BitmapImage::decodeCountForTesting() const |
| 606 | { |
| 607 | return m_decodeCountForTesting; |
| 608 | } |
| 609 | |
| 610 | void BitmapImage::dump(TextStream& ts) const |
| 611 | { |
| 612 | Image::dump(ts); |
| 613 | |
| 614 | if (isAnimated()) |
| 615 | ts.dumpProperty("current-frame" , m_currentFrame); |
| 616 | |
| 617 | m_source->dump(ts); |
| 618 | } |
| 619 | |
| 620 | } |
| 621 | |