1 | /* |
2 | * Copyright (C) 2016-2017 Apple Inc. All rights reserved. |
3 | * |
4 | * Redistribution and use in source and binary forms, with or without |
5 | * modification, are permitted provided that the following conditions |
6 | * are met: |
7 | * 1. Redistributions of source code must retain the above copyright |
8 | * notice, this list of conditions and the following disclaimer. |
9 | * 2. Redistributions in binary form must reproduce the above copyright |
10 | * notice, this list of conditions and the following disclaimer in the |
11 | * documentation and/or other materials provided with the distribution. |
12 | * |
13 | * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY |
14 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
15 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
16 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR |
17 | * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, |
18 | * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, |
19 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR |
20 | * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY |
21 | * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
22 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
23 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
24 | */ |
25 | |
26 | #include "config.h" |
27 | #include "ImageSource.h" |
28 | |
29 | #include "BitmapImage.h" |
30 | #include "ImageDecoder.h" |
31 | #include "ImageObserver.h" |
32 | #include "Logging.h" |
33 | #include <wtf/CheckedArithmetic.h> |
34 | #include <wtf/MainThread.h> |
35 | #include <wtf/RunLoop.h> |
36 | #include <wtf/SystemTracing.h> |
37 | #include <wtf/URL.h> |
38 | |
39 | #if USE(DIRECT2D) |
40 | #include "GraphicsContext.h" |
41 | #endif |
42 | |
43 | namespace WebCore { |
44 | |
45 | ImageSource::ImageSource(BitmapImage* image, AlphaOption alphaOption, GammaAndColorProfileOption gammaAndColorProfileOption) |
46 | : m_image(image) |
47 | , m_alphaOption(alphaOption) |
48 | , m_gammaAndColorProfileOption(gammaAndColorProfileOption) |
49 | { |
50 | } |
51 | |
52 | ImageSource::ImageSource(NativeImagePtr&& nativeImage) |
53 | { |
54 | m_frameCount = 1; |
55 | m_encodedDataStatus = EncodedDataStatus::Complete; |
56 | growFrames(); |
57 | |
58 | setNativeImage(WTFMove(nativeImage)); |
59 | |
60 | m_decodedSize = m_frames[0].frameBytes(); |
61 | |
62 | // The assumption is the memory image will be displayed with the default |
63 | // orientation. So set m_sizeRespectingOrientation to be the same as m_size. |
64 | m_size = m_frames[0].size(); |
65 | m_sizeRespectingOrientation = m_size; |
66 | } |
67 | |
68 | ImageSource::~ImageSource() |
69 | { |
70 | ASSERT(!hasAsyncDecodingQueue()); |
71 | } |
72 | |
73 | bool ImageSource::ensureDecoderAvailable(SharedBuffer* data) |
74 | { |
75 | if (!data || isDecoderAvailable()) |
76 | return true; |
77 | |
78 | m_decoder = ImageDecoder::create(*data, mimeType(), m_alphaOption, m_gammaAndColorProfileOption); |
79 | if (!isDecoderAvailable()) |
80 | return false; |
81 | |
82 | m_decoder->setEncodedDataStatusChangeCallback([weakThis = makeWeakPtr(this)] (auto status) { |
83 | if (weakThis) |
84 | weakThis->encodedDataStatusChanged(status); |
85 | }); |
86 | |
87 | if (auto expectedContentSize = expectedContentLength()) |
88 | m_decoder->setExpectedContentSize(expectedContentSize); |
89 | |
90 | // Changing the decoder has to stop the decoding thread. The current frame will |
91 | // continue decoding safely because the decoding thread has its own |
92 | // reference of the old decoder. |
93 | stopAsyncDecodingQueue(); |
94 | return true; |
95 | } |
96 | |
97 | void ImageSource::setData(SharedBuffer* data, bool allDataReceived) |
98 | { |
99 | if (!data || !ensureDecoderAvailable(data)) |
100 | return; |
101 | |
102 | m_decoder->setData(*data, allDataReceived); |
103 | } |
104 | |
105 | void ImageSource::resetData(SharedBuffer* data) |
106 | { |
107 | m_decoder = nullptr; |
108 | setData(data, isAllDataReceived()); |
109 | } |
110 | |
111 | EncodedDataStatus ImageSource::dataChanged(SharedBuffer* data, bool allDataReceived) |
112 | { |
113 | setData(data, allDataReceived); |
114 | clearMetadata(); |
115 | EncodedDataStatus status = encodedDataStatus(); |
116 | if (status >= EncodedDataStatus::SizeAvailable) |
117 | growFrames(); |
118 | return status; |
119 | } |
120 | |
121 | bool ImageSource::isAllDataReceived() |
122 | { |
123 | return isDecoderAvailable() ? m_decoder->isAllDataReceived() : frameCount(); |
124 | } |
125 | |
126 | void ImageSource::destroyDecodedData(size_t frameCount, size_t excludeFrame) |
127 | { |
128 | unsigned decodedSize = 0; |
129 | |
130 | ASSERT(frameCount <= m_frames.size()); |
131 | |
132 | for (size_t index = 0; index < frameCount; ++index) { |
133 | if (index == excludeFrame) |
134 | continue; |
135 | decodedSize += m_frames[index].clearImage(); |
136 | } |
137 | |
138 | decodedSizeReset(decodedSize); |
139 | } |
140 | |
141 | void ImageSource::destroyIncompleteDecodedData() |
142 | { |
143 | unsigned decodedSize = 0; |
144 | |
145 | for (auto& frame : m_frames) { |
146 | if (!frame.hasMetadata() || frame.isComplete()) |
147 | continue; |
148 | |
149 | decodedSize += frame.clear(); |
150 | } |
151 | |
152 | decodedSizeDecreased(decodedSize); |
153 | } |
154 | |
155 | void ImageSource::clearFrameBufferCache(size_t beforeFrame) |
156 | { |
157 | if (!isDecoderAvailable()) |
158 | return; |
159 | m_decoder->clearFrameBufferCache(beforeFrame); |
160 | } |
161 | |
162 | void ImageSource::encodedDataStatusChanged(EncodedDataStatus status) |
163 | { |
164 | if (status == m_encodedDataStatus) |
165 | return; |
166 | |
167 | m_encodedDataStatus = status; |
168 | |
169 | if (status >= EncodedDataStatus::SizeAvailable) |
170 | growFrames(); |
171 | |
172 | if (m_image && m_image->imageObserver()) |
173 | m_image->imageObserver()->encodedDataStatusChanged(*m_image, status); |
174 | } |
175 | |
176 | void ImageSource::decodedSizeChanged(long long decodedSize) |
177 | { |
178 | if (!decodedSize || !m_image || !m_image->imageObserver()) |
179 | return; |
180 | |
181 | m_image->imageObserver()->decodedSizeChanged(*m_image, decodedSize); |
182 | } |
183 | |
184 | void ImageSource::decodedSizeIncreased(unsigned decodedSize) |
185 | { |
186 | if (!decodedSize) |
187 | return; |
188 | |
189 | m_decodedSize += decodedSize; |
190 | |
191 | // The fully-decoded frame will subsume the partially decoded data used |
192 | // to determine image properties. |
193 | long long changeSize = static_cast<long long>(decodedSize) - m_decodedPropertiesSize; |
194 | m_decodedPropertiesSize = 0; |
195 | decodedSizeChanged(changeSize); |
196 | } |
197 | |
198 | void ImageSource::decodedSizeDecreased(unsigned decodedSize) |
199 | { |
200 | if (!decodedSize) |
201 | return; |
202 | |
203 | ASSERT(m_decodedSize >= decodedSize); |
204 | m_decodedSize -= decodedSize; |
205 | decodedSizeChanged(-static_cast<long long>(decodedSize)); |
206 | } |
207 | |
208 | void ImageSource::decodedSizeReset(unsigned decodedSize) |
209 | { |
210 | ASSERT(m_decodedSize >= decodedSize); |
211 | m_decodedSize -= decodedSize; |
212 | |
213 | // Clearing the ImageSource destroys the extra decoded data used for |
214 | // determining image properties. |
215 | decodedSize += m_decodedPropertiesSize; |
216 | m_decodedPropertiesSize = 0; |
217 | decodedSizeChanged(-static_cast<long long>(decodedSize)); |
218 | } |
219 | |
220 | void ImageSource::didDecodeProperties(unsigned decodedPropertiesSize) |
221 | { |
222 | if (m_decodedSize) |
223 | return; |
224 | |
225 | long long decodedSize = static_cast<long long>(decodedPropertiesSize) - m_decodedPropertiesSize; |
226 | m_decodedPropertiesSize = decodedPropertiesSize; |
227 | decodedSizeChanged(decodedSize); |
228 | } |
229 | |
230 | void ImageSource::growFrames() |
231 | { |
232 | ASSERT(isSizeAvailable()); |
233 | auto newSize = frameCount(); |
234 | if (newSize > m_frames.size()) |
235 | m_frames.grow(newSize); |
236 | } |
237 | |
238 | void ImageSource::setNativeImage(NativeImagePtr&& nativeImage) |
239 | { |
240 | ASSERT(m_frames.size() == 1); |
241 | ImageFrame& frame = m_frames[0]; |
242 | |
243 | ASSERT(!isDecoderAvailable()); |
244 | |
245 | frame.m_nativeImage = WTFMove(nativeImage); |
246 | |
247 | frame.m_decodingStatus = DecodingStatus::Complete; |
248 | frame.m_size = nativeImageSize(frame.m_nativeImage); |
249 | frame.m_hasAlpha = nativeImageHasAlpha(frame.m_nativeImage); |
250 | } |
251 | |
252 | void ImageSource::cacheMetadataAtIndex(size_t index, SubsamplingLevel subsamplingLevel, DecodingStatus decodingStatus) |
253 | { |
254 | ASSERT(index < m_frames.size()); |
255 | ImageFrame& frame = m_frames[index]; |
256 | |
257 | ASSERT(isDecoderAvailable()); |
258 | if (decodingStatus == DecodingStatus::Invalid) |
259 | frame.m_decodingStatus = m_decoder->frameIsCompleteAtIndex(index) ? DecodingStatus::Complete : DecodingStatus::Partial; |
260 | else |
261 | frame.m_decodingStatus = decodingStatus; |
262 | |
263 | if (frame.hasMetadata()) |
264 | return; |
265 | |
266 | frame.m_subsamplingLevel = subsamplingLevel; |
267 | |
268 | if (frame.m_decodingOptions.hasSizeForDrawing()) { |
269 | ASSERT(frame.hasNativeImage()); |
270 | frame.m_size = nativeImageSize(frame.nativeImage()); |
271 | } else |
272 | frame.m_size = m_decoder->frameSizeAtIndex(index, subsamplingLevel); |
273 | |
274 | frame.m_orientation = m_decoder->frameOrientationAtIndex(index); |
275 | frame.m_hasAlpha = m_decoder->frameHasAlphaAtIndex(index); |
276 | |
277 | if (repetitionCount()) |
278 | frame.m_duration = m_decoder->frameDurationAtIndex(index); |
279 | } |
280 | |
281 | void ImageSource::cacheNativeImageAtIndex(NativeImagePtr&& nativeImage, size_t index, SubsamplingLevel subsamplingLevel, const DecodingOptions& decodingOptions, DecodingStatus decodingStatus) |
282 | { |
283 | ASSERT(index < m_frames.size()); |
284 | ImageFrame& frame = m_frames[index]; |
285 | |
286 | // Clear the current image frame and update the observer with this clearance. |
287 | decodedSizeDecreased(frame.clear()); |
288 | |
289 | // Do not cache the NativeImage if adding its frameByes to the MemoryCache will cause numerical overflow. |
290 | size_t frameBytes = size().unclampedArea() * sizeof(uint32_t); |
291 | if (!WTF::isInBounds<unsigned>(frameBytes + decodedSize())) |
292 | return; |
293 | |
294 | // Move the new image to the cache. |
295 | frame.m_nativeImage = WTFMove(nativeImage); |
296 | frame.m_decodingOptions = decodingOptions; |
297 | cacheMetadataAtIndex(index, subsamplingLevel, decodingStatus); |
298 | |
299 | // Update the observer with the new image frame bytes. |
300 | decodedSizeIncreased(frame.frameBytes()); |
301 | } |
302 | |
303 | void ImageSource::cacheNativeImageAtIndexAsync(NativeImagePtr&& nativeImage, size_t index, SubsamplingLevel subsamplingLevel, const DecodingOptions& decodingOptions, DecodingStatus decodingStatus) |
304 | { |
305 | if (!isDecoderAvailable()) |
306 | return; |
307 | |
308 | ASSERT(index < m_frames.size()); |
309 | |
310 | // Clean the old native image and set a new one |
311 | cacheNativeImageAtIndex(WTFMove(nativeImage), index, subsamplingLevel, decodingOptions, decodingStatus); |
312 | LOG(Images, "ImageSource::%s - %p - url: %s [frame %ld has been cached]" , __FUNCTION__, this, sourceURL().string().utf8().data(), index); |
313 | |
314 | // Notify the image with the readiness of the new frame NativeImage. |
315 | if (m_image) |
316 | m_image->imageFrameAvailableAtIndex(index); |
317 | } |
318 | |
319 | WorkQueue& ImageSource::decodingQueue() |
320 | { |
321 | if (!m_decodingQueue) |
322 | m_decodingQueue = WorkQueue::create("org.webkit.ImageDecoder" , WorkQueue::Type::Serial, WorkQueue::QOS::Default); |
323 | |
324 | return *m_decodingQueue; |
325 | } |
326 | |
327 | ImageSource::FrameRequestQueue& ImageSource::frameRequestQueue() |
328 | { |
329 | if (!m_frameRequestQueue) |
330 | m_frameRequestQueue = FrameRequestQueue::create(); |
331 | |
332 | return *m_frameRequestQueue; |
333 | } |
334 | |
335 | bool ImageSource::canUseAsyncDecoding() |
336 | { |
337 | if (!isDecoderAvailable()) |
338 | return false; |
339 | // FIXME: figure out the best heuristic for enabling async image decoding. |
340 | return size().area() * sizeof(uint32_t) >= (frameCount() > 1 ? 100 * KB : 500 * KB); |
341 | } |
342 | |
343 | void ImageSource::startAsyncDecodingQueue() |
344 | { |
345 | if (hasAsyncDecodingQueue() || !isDecoderAvailable()) |
346 | return; |
347 | |
348 | // We need to protect this, m_decodingQueue and m_decoder from being deleted while we are in the decoding loop. |
349 | decodingQueue().dispatch([protectedThis = makeRef(*this), protectedDecodingQueue = makeRef(decodingQueue()), protectedFrameRequestQueue = makeRef(frameRequestQueue()), protectedDecoder = makeRef(*m_decoder), sourceURL = sourceURL().string().isolatedCopy()] { |
350 | ImageFrameRequest frameRequest; |
351 | Seconds minDecodingDuration = protectedThis->frameDecodingDurationForTesting(); |
352 | |
353 | while (protectedFrameRequestQueue->dequeue(frameRequest)) { |
354 | TraceScope tracingScope(AsyncImageDecodeStart, AsyncImageDecodeEnd); |
355 | |
356 | MonotonicTime startingTime; |
357 | if (minDecodingDuration > 0_s) |
358 | startingTime = MonotonicTime::now(); |
359 | |
360 | // Get the frame NativeImage on the decoding thread. |
361 | NativeImagePtr nativeImage = protectedDecoder->createFrameImageAtIndex(frameRequest.index, frameRequest.subsamplingLevel, frameRequest.decodingOptions); |
362 | if (nativeImage) |
363 | LOG(Images, "ImageSource::%s - %p - url: %s [frame %ld has been decoded]" , __FUNCTION__, protectedThis.ptr(), sourceURL.utf8().data(), frameRequest.index); |
364 | else { |
365 | LOG(Images, "ImageSource::%s - %p - url: %s [decoding for frame %ld has failed]" , __FUNCTION__, protectedThis.ptr(), sourceURL.utf8().data(), frameRequest.index); |
366 | continue; |
367 | } |
368 | |
369 | // Pretend as if the decoding takes minDecodingDuration. |
370 | if (minDecodingDuration > 0_s) |
371 | sleep(minDecodingDuration - (MonotonicTime::now() - startingTime)); |
372 | |
373 | // Update the cached frames on the main thread to avoid updating the MemoryCache from a different thread. |
374 | callOnMainThread([protectedThis = protectedThis.copyRef(), protectedQueue = protectedDecodingQueue.copyRef(), protectedDecoder = protectedDecoder.copyRef(), sourceURL = sourceURL.isolatedCopy(), nativeImage = WTFMove(nativeImage), frameRequest] () mutable { |
375 | // The queue may have been closed if after we got the frame NativeImage, stopAsyncDecodingQueue() was called. |
376 | if (protectedQueue.ptr() == protectedThis->m_decodingQueue && protectedDecoder.ptr() == protectedThis->m_decoder) { |
377 | ASSERT(protectedThis->m_frameCommitQueue.first() == frameRequest); |
378 | protectedThis->m_frameCommitQueue.removeFirst(); |
379 | protectedThis->cacheNativeImageAtIndexAsync(WTFMove(nativeImage), frameRequest.index, frameRequest.subsamplingLevel, frameRequest.decodingOptions, frameRequest.decodingStatus); |
380 | } else |
381 | LOG(Images, "ImageSource::%s - %p - url: %s [frame %ld will not cached]" , __FUNCTION__, protectedThis.ptr(), sourceURL.utf8().data(), frameRequest.index); |
382 | }); |
383 | } |
384 | }); |
385 | } |
386 | |
387 | void ImageSource::requestFrameAsyncDecodingAtIndex(size_t index, SubsamplingLevel subsamplingLevel, const Optional<IntSize>& sizeForDrawing) |
388 | { |
389 | ASSERT(isDecoderAvailable()); |
390 | if (!hasAsyncDecodingQueue()) |
391 | startAsyncDecodingQueue(); |
392 | |
393 | ASSERT(index < m_frames.size()); |
394 | DecodingStatus decodingStatus = m_decoder->frameIsCompleteAtIndex(index) ? DecodingStatus::Complete : DecodingStatus::Partial; |
395 | |
396 | LOG(Images, "ImageSource::%s - %p - url: %s [enqueuing frame %ld for decoding]" , __FUNCTION__, this, sourceURL().string().utf8().data(), index); |
397 | m_frameRequestQueue->enqueue({ index, subsamplingLevel, sizeForDrawing, decodingStatus }); |
398 | m_frameCommitQueue.append({ index, subsamplingLevel, sizeForDrawing, decodingStatus }); |
399 | } |
400 | |
401 | bool ImageSource::isAsyncDecodingQueueIdle() const |
402 | { |
403 | return m_frameCommitQueue.isEmpty(); |
404 | } |
405 | |
406 | void ImageSource::stopAsyncDecodingQueue() |
407 | { |
408 | if (!hasAsyncDecodingQueue()) |
409 | return; |
410 | |
411 | std::for_each(m_frameCommitQueue.begin(), m_frameCommitQueue.end(), [this](const ImageFrameRequest& frameRequest) { |
412 | ImageFrame& frame = m_frames[frameRequest.index]; |
413 | if (!frame.isInvalid()) { |
414 | LOG(Images, "ImageSource::%s - %p - url: %s [decoding has been cancelled for frame %ld]" , __FUNCTION__, this, sourceURL().string().utf8().data(), frameRequest.index); |
415 | frame.clear(); |
416 | } |
417 | }); |
418 | |
419 | // Close m_frameRequestQueue then set it to nullptr. A new decoding thread might start and a |
420 | // new m_frameRequestQueue will be created. So the terminating thread will not have access to it. |
421 | m_frameRequestQueue->close(); |
422 | m_frameRequestQueue = nullptr; |
423 | m_frameCommitQueue.clear(); |
424 | m_decodingQueue = nullptr; |
425 | LOG(Images, "ImageSource::%s - %p - url: %s [decoding has been stopped]" , __FUNCTION__, this, sourceURL().string().utf8().data()); |
426 | } |
427 | |
428 | const ImageFrame& ImageSource::frameAtIndexCacheIfNeeded(size_t index, ImageFrame::Caching caching, const Optional<SubsamplingLevel>& subsamplingLevel) |
429 | { |
430 | ASSERT(index < m_frames.size()); |
431 | ImageFrame& frame = m_frames[index]; |
432 | if (!isDecoderAvailable() || frameIsBeingDecodedAndIsCompatibleWithOptionsAtIndex(index, DecodingOptions(DecodingMode::Asynchronous))) |
433 | return frame; |
434 | |
435 | SubsamplingLevel subsamplingLevelValue = subsamplingLevel ? subsamplingLevel.value() : frame.subsamplingLevel(); |
436 | |
437 | switch (caching) { |
438 | case ImageFrame::Caching::Metadata: |
439 | // Retrieve the metadata from ImageDecoder if the ImageFrame isn't complete. |
440 | if (frame.isComplete()) |
441 | break; |
442 | cacheMetadataAtIndex(index, subsamplingLevelValue); |
443 | break; |
444 | |
445 | case ImageFrame::Caching::MetadataAndImage: |
446 | // Cache the image and retrieve the metadata from ImageDecoder only if there was not valid image stored. |
447 | if (frame.hasFullSizeNativeImage(subsamplingLevel)) |
448 | break; |
449 | // We have to perform synchronous image decoding in this code. |
450 | NativeImagePtr nativeImage = m_decoder->createFrameImageAtIndex(index, subsamplingLevelValue); |
451 | // Clean the old native image and set a new one. |
452 | cacheNativeImageAtIndex(WTFMove(nativeImage), index, subsamplingLevelValue, DecodingOptions(DecodingMode::Synchronous)); |
453 | break; |
454 | } |
455 | |
456 | return frame; |
457 | } |
458 | |
459 | void ImageSource::clearMetadata() |
460 | { |
461 | m_frameCount = WTF::nullopt; |
462 | m_repetitionCount = WTF::nullopt; |
463 | m_singlePixelSolidColor = WTF::nullopt; |
464 | m_encodedDataStatus = WTF::nullopt; |
465 | m_uti = WTF::nullopt; |
466 | } |
467 | |
468 | URL ImageSource::sourceURL() const |
469 | { |
470 | return m_image ? m_image->sourceURL() : URL(); |
471 | } |
472 | |
473 | String ImageSource::mimeType() const |
474 | { |
475 | return m_image ? m_image->mimeType() : emptyString(); |
476 | } |
477 | |
478 | long long ImageSource::expectedContentLength() const |
479 | { |
480 | return m_image ? m_image->expectedContentLength() : 0; |
481 | } |
482 | |
483 | template<typename T, T (ImageDecoder::*functor)() const> |
484 | T ImageSource::metadata(const T& defaultValue, Optional<T>* cachedValue) |
485 | { |
486 | if (cachedValue && *cachedValue) |
487 | return cachedValue->value(); |
488 | |
489 | if (!isDecoderAvailable() || !m_decoder->isSizeAvailable()) |
490 | return defaultValue; |
491 | |
492 | if (!cachedValue) |
493 | return (*m_decoder.*functor)(); |
494 | |
495 | *cachedValue = (*m_decoder.*functor)(); |
496 | didDecodeProperties(m_decoder->bytesDecodedToDetermineProperties()); |
497 | return cachedValue->value(); |
498 | } |
499 | |
500 | template<typename T, typename... Args> |
501 | T ImageSource::frameMetadataAtIndex(size_t index, T (ImageFrame::*functor)(Args...) const, Args&&... args) |
502 | { |
503 | const ImageFrame& frame = index < m_frames.size() ? m_frames[index] : ImageFrame::defaultFrame(); |
504 | return (frame.*functor)(std::forward<Args>(args)...); |
505 | } |
506 | |
507 | template<typename T, typename... Args> |
508 | T ImageSource::frameMetadataAtIndexCacheIfNeeded(size_t index, T (ImageFrame::*functor)() const, Optional<T>* cachedValue, Args&&... args) |
509 | { |
510 | if (cachedValue && *cachedValue) |
511 | return cachedValue->value(); |
512 | |
513 | const ImageFrame& frame = index < m_frames.size() ? frameAtIndexCacheIfNeeded(index, std::forward<Args>(args)...) : ImageFrame::defaultFrame(); |
514 | |
515 | // Don't cache any unavailable frame metadata. |
516 | if (!frame.hasMetadata() || !cachedValue) |
517 | return (frame.*functor)(); |
518 | |
519 | *cachedValue = (frame.*functor)(); |
520 | return cachedValue->value(); |
521 | } |
522 | |
523 | EncodedDataStatus ImageSource::encodedDataStatus() |
524 | { |
525 | return metadata<EncodedDataStatus, (&ImageDecoder::encodedDataStatus)>(EncodedDataStatus::Unknown, &m_encodedDataStatus); |
526 | } |
527 | |
528 | size_t ImageSource::frameCount() |
529 | { |
530 | return metadata<size_t, (&ImageDecoder::frameCount)>(m_frames.size(), &m_frameCount); |
531 | } |
532 | |
533 | RepetitionCount ImageSource::repetitionCount() |
534 | { |
535 | return metadata<RepetitionCount, (&ImageDecoder::repetitionCount)>(RepetitionCountNone, &m_repetitionCount); |
536 | } |
537 | |
538 | String ImageSource::uti() |
539 | { |
540 | #if USE(CG) |
541 | return metadata<String, (&ImageDecoder::uti)>(String(), &m_uti); |
542 | #else |
543 | return String(); |
544 | #endif |
545 | } |
546 | |
547 | String ImageSource::filenameExtension() |
548 | { |
549 | return metadata<String, (&ImageDecoder::filenameExtension)>(String(), &m_filenameExtension); |
550 | } |
551 | |
552 | Optional<IntPoint> ImageSource::hotSpot() |
553 | { |
554 | return metadata<Optional<IntPoint>, (&ImageDecoder::hotSpot)>(WTF::nullopt, &m_hotSpot); |
555 | } |
556 | |
557 | IntSize ImageSource::size() |
558 | { |
559 | #if !USE(CG) |
560 | // It's possible that we have decoded the metadata, but not frame contents yet. In that case ImageDecoder claims to |
561 | // have the size available, but the frame cache is empty. Return the decoder size without caching in such case. |
562 | if (m_frames.isEmpty() && isDecoderAvailable()) |
563 | return m_decoder->size(); |
564 | #endif |
565 | return frameMetadataAtIndexCacheIfNeeded<IntSize>(0, (&ImageFrame::size), &m_size, ImageFrame::Caching::Metadata, SubsamplingLevel::Default); |
566 | } |
567 | |
568 | IntSize ImageSource::sizeRespectingOrientation() |
569 | { |
570 | return frameMetadataAtIndexCacheIfNeeded<IntSize>(0, (&ImageFrame::sizeRespectingOrientation), &m_sizeRespectingOrientation, ImageFrame::Caching::Metadata, SubsamplingLevel::Default); |
571 | } |
572 | |
573 | Color ImageSource::singlePixelSolidColor() |
574 | { |
575 | if (!m_singlePixelSolidColor && (size() != IntSize(1, 1) || frameCount() != 1)) |
576 | m_singlePixelSolidColor = Color(); |
577 | |
578 | if (m_singlePixelSolidColor) |
579 | return m_singlePixelSolidColor.value(); |
580 | |
581 | return frameMetadataAtIndexCacheIfNeeded<Color>(0, (&ImageFrame::singlePixelSolidColor), &m_singlePixelSolidColor, ImageFrame::Caching::MetadataAndImage); |
582 | } |
583 | |
584 | SubsamplingLevel ImageSource::maximumSubsamplingLevel() |
585 | { |
586 | if (m_maximumSubsamplingLevel) |
587 | return m_maximumSubsamplingLevel.value(); |
588 | |
589 | if (!isDecoderAvailable() || !m_decoder->frameAllowSubsamplingAtIndex(0)) |
590 | return SubsamplingLevel::Default; |
591 | |
592 | // FIXME: this value was chosen to be appropriate for iOS since the image |
593 | // subsampling is only enabled by default on iOS. Choose a different value |
594 | // if image subsampling is enabled on other platform. |
595 | const int maximumImageAreaBeforeSubsampling = 5 * 1024 * 1024; |
596 | SubsamplingLevel level = SubsamplingLevel::First; |
597 | |
598 | for (; level < SubsamplingLevel::Last; ++level) { |
599 | if (frameSizeAtIndex(0, level).area().unsafeGet() < maximumImageAreaBeforeSubsampling) |
600 | break; |
601 | } |
602 | |
603 | m_maximumSubsamplingLevel = level; |
604 | return m_maximumSubsamplingLevel.value(); |
605 | } |
606 | |
607 | bool ImageSource::frameIsBeingDecodedAndIsCompatibleWithOptionsAtIndex(size_t index, const DecodingOptions& decodingOptions) |
608 | { |
609 | auto it = std::find_if(m_frameCommitQueue.begin(), m_frameCommitQueue.end(), [index, &decodingOptions](const ImageFrameRequest& frameRequest) { |
610 | return frameRequest.index == index && frameRequest.decodingOptions.isAsynchronousCompatibleWith(decodingOptions); |
611 | }); |
612 | return it != m_frameCommitQueue.end(); |
613 | } |
614 | |
615 | DecodingStatus ImageSource::frameDecodingStatusAtIndex(size_t index) |
616 | { |
617 | return frameMetadataAtIndexCacheIfNeeded<DecodingStatus>(index, (&ImageFrame::decodingStatus), nullptr, ImageFrame::Caching::Metadata); |
618 | } |
619 | |
620 | bool ImageSource::frameHasAlphaAtIndex(size_t index) |
621 | { |
622 | return frameMetadataAtIndex<bool>(index, (&ImageFrame::hasAlpha)); |
623 | } |
624 | |
625 | bool ImageSource::frameHasFullSizeNativeImageAtIndex(size_t index, const Optional<SubsamplingLevel>& subsamplingLevel) |
626 | { |
627 | return frameMetadataAtIndex<bool>(index, (&ImageFrame::hasFullSizeNativeImage), subsamplingLevel); |
628 | } |
629 | |
630 | bool ImageSource::frameHasDecodedNativeImageCompatibleWithOptionsAtIndex(size_t index, const Optional<SubsamplingLevel>& subsamplingLevel, const DecodingOptions& decodingOptions) |
631 | { |
632 | return frameMetadataAtIndex<bool>(index, (&ImageFrame::hasDecodedNativeImageCompatibleWithOptions), subsamplingLevel, decodingOptions); |
633 | } |
634 | |
635 | SubsamplingLevel ImageSource::frameSubsamplingLevelAtIndex(size_t index) |
636 | { |
637 | return frameMetadataAtIndex<SubsamplingLevel>(index, (&ImageFrame::subsamplingLevel)); |
638 | } |
639 | |
640 | IntSize ImageSource::frameSizeAtIndex(size_t index, SubsamplingLevel subsamplingLevel) |
641 | { |
642 | return frameMetadataAtIndexCacheIfNeeded<IntSize>(index, (&ImageFrame::size), nullptr, ImageFrame::Caching::Metadata, subsamplingLevel); |
643 | } |
644 | |
645 | unsigned ImageSource::frameBytesAtIndex(size_t index, SubsamplingLevel subsamplingLevel) |
646 | { |
647 | return frameMetadataAtIndexCacheIfNeeded<unsigned>(index, (&ImageFrame::frameBytes), nullptr, ImageFrame::Caching::Metadata, subsamplingLevel); |
648 | } |
649 | |
650 | Seconds ImageSource::frameDurationAtIndex(size_t index) |
651 | { |
652 | return frameMetadataAtIndexCacheIfNeeded<Seconds>(index, (&ImageFrame::duration), nullptr, ImageFrame::Caching::Metadata); |
653 | } |
654 | |
655 | ImageOrientation ImageSource::frameOrientationAtIndex(size_t index) |
656 | { |
657 | return frameMetadataAtIndexCacheIfNeeded<ImageOrientation>(index, (&ImageFrame::orientation), nullptr, ImageFrame::Caching::Metadata); |
658 | } |
659 | |
660 | #if USE(DIRECT2D) |
661 | void ImageSource::setTargetContext(const GraphicsContext* targetContext) |
662 | { |
663 | if (isDecoderAvailable() && targetContext) |
664 | m_decoder->setTargetContext(targetContext->platformContext()); |
665 | } |
666 | #endif |
667 | |
668 | NativeImagePtr ImageSource::createFrameImageAtIndex(size_t index, SubsamplingLevel subsamplingLevel) |
669 | { |
670 | return isDecoderAvailable() ? m_decoder->createFrameImageAtIndex(index, subsamplingLevel) : nullptr; |
671 | } |
672 | |
673 | NativeImagePtr ImageSource::frameImageAtIndex(size_t index) |
674 | { |
675 | return frameMetadataAtIndex<NativeImagePtr>(index, (&ImageFrame::nativeImage)); |
676 | } |
677 | |
678 | NativeImagePtr ImageSource::frameImageAtIndexCacheIfNeeded(size_t index, SubsamplingLevel subsamplingLevel) |
679 | { |
680 | return frameMetadataAtIndexCacheIfNeeded<NativeImagePtr>(index, (&ImageFrame::nativeImage), nullptr, ImageFrame::Caching::MetadataAndImage, subsamplingLevel); |
681 | } |
682 | |
683 | void ImageSource::dump(TextStream& ts) |
684 | { |
685 | ts.dumpProperty("type" , filenameExtension()); |
686 | ts.dumpProperty("frame-count" , frameCount()); |
687 | ts.dumpProperty("repetitions" , repetitionCount()); |
688 | ts.dumpProperty("solid-color" , singlePixelSolidColor()); |
689 | |
690 | ImageOrientation orientation = frameOrientationAtIndex(0); |
691 | if (orientation != OriginTopLeft) |
692 | ts.dumpProperty("orientation" , orientation); |
693 | } |
694 | |
695 | } |
696 | |