| 1 | /* |
| 2 | * Copyright (C) 2013 Google Inc. All rights reserved. |
| 3 | * Copyright (C) 2013-2019 Apple Inc. All rights reserved. |
| 4 | * |
| 5 | * Redistribution and use in source and binary forms, with or without |
| 6 | * modification, are permitted provided that the following conditions are |
| 7 | * met: |
| 8 | * |
| 9 | * * Redistributions of source code must retain the above copyright |
| 10 | * notice, this list of conditions and the following disclaimer. |
| 11 | * * Redistributions in binary form must reproduce the above |
| 12 | * copyright notice, this list of conditions and the following disclaimer |
| 13 | * in the documentation and/or other materials provided with the |
| 14 | * distribution. |
| 15 | * * Neither the name of Google Inc. nor the names of its |
| 16 | * contributors may be used to endorse or promote products derived from |
| 17 | * this software without specific prior written permission. |
| 18 | * |
| 19 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
| 20 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
| 21 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
| 22 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
| 23 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
| 24 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
| 25 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
| 26 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
| 27 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| 28 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| 29 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 30 | */ |
| 31 | |
| 32 | #include "config.h" |
| 33 | #include "SourceBuffer.h" |
| 34 | |
| 35 | #if ENABLE(MEDIA_SOURCE) |
| 36 | |
| 37 | #include "AudioTrackList.h" |
| 38 | #include "BufferSource.h" |
| 39 | #include "Event.h" |
| 40 | #include "EventNames.h" |
| 41 | #include "GenericEventQueue.h" |
| 42 | #include "HTMLMediaElement.h" |
| 43 | #include "InbandTextTrack.h" |
| 44 | #include "Logging.h" |
| 45 | #include "MediaDescription.h" |
| 46 | #include "MediaSample.h" |
| 47 | #include "MediaSource.h" |
| 48 | #include "SampleMap.h" |
| 49 | #include "SourceBufferList.h" |
| 50 | #include "SourceBufferPrivate.h" |
| 51 | #include "TextTrackList.h" |
| 52 | #include "TimeRanges.h" |
| 53 | #include "VideoTrackList.h" |
| 54 | #include <JavaScriptCore/JSCInlines.h> |
| 55 | #include <JavaScriptCore/JSLock.h> |
| 56 | #include <JavaScriptCore/VM.h> |
| 57 | #include <limits> |
| 58 | #include <wtf/CheckedArithmetic.h> |
| 59 | #include <wtf/IsoMallocInlines.h> |
| 60 | |
| 61 | namespace WebCore { |
| 62 | |
| 63 | WTF_MAKE_ISO_ALLOCATED_IMPL(SourceBuffer); |
| 64 | |
| 65 | static const double ExponentialMovingAverageCoefficient = 0.1; |
| 66 | |
| 67 | struct SourceBuffer::TrackBuffer { |
| 68 | MediaTime lastDecodeTimestamp; |
| 69 | MediaTime greatestDecodeDuration; |
| 70 | MediaTime lastFrameDuration; |
| 71 | MediaTime highestPresentationTimestamp; |
| 72 | MediaTime lastEnqueuedPresentationTime; |
| 73 | DecodeOrderSampleMap::KeyType lastEnqueuedDecodeKey; |
| 74 | MediaTime lastEnqueuedDecodeDuration; |
| 75 | MediaTime roundedTimestampOffset; |
| 76 | uint32_t lastFrameTimescale { 0 }; |
| 77 | bool needRandomAccessFlag { true }; |
| 78 | bool enabled { false }; |
| 79 | bool needsReenqueueing { false }; |
| 80 | SampleMap samples; |
| 81 | DecodeOrderSampleMap::MapType decodeQueue; |
| 82 | RefPtr<MediaDescription> description; |
| 83 | PlatformTimeRanges buffered; |
| 84 | |
| 85 | TrackBuffer() |
| 86 | : lastDecodeTimestamp(MediaTime::invalidTime()) |
| 87 | , greatestDecodeDuration(MediaTime::invalidTime()) |
| 88 | , lastFrameDuration(MediaTime::invalidTime()) |
| 89 | , highestPresentationTimestamp(MediaTime::invalidTime()) |
| 90 | , lastEnqueuedPresentationTime(MediaTime::invalidTime()) |
| 91 | , lastEnqueuedDecodeKey({MediaTime::invalidTime(), MediaTime::invalidTime()}) |
| 92 | , lastEnqueuedDecodeDuration(MediaTime::invalidTime()) |
| 93 | { |
| 94 | } |
| 95 | }; |
| 96 | |
| 97 | Ref<SourceBuffer> SourceBuffer::create(Ref<SourceBufferPrivate>&& sourceBufferPrivate, MediaSource* source) |
| 98 | { |
| 99 | auto sourceBuffer = adoptRef(*new SourceBuffer(WTFMove(sourceBufferPrivate), source)); |
| 100 | sourceBuffer->suspendIfNeeded(); |
| 101 | return sourceBuffer; |
| 102 | } |
| 103 | |
| 104 | SourceBuffer::SourceBuffer(Ref<SourceBufferPrivate>&& sourceBufferPrivate, MediaSource* source) |
| 105 | : ActiveDOMObject(source->scriptExecutionContext()) |
| 106 | , m_private(WTFMove(sourceBufferPrivate)) |
| 107 | , m_source(source) |
| 108 | , m_asyncEventQueue(*this) |
| 109 | , m_appendBufferTimer(*this, &SourceBuffer::appendBufferTimerFired) |
| 110 | , m_appendWindowStart(MediaTime::zeroTime()) |
| 111 | , m_appendWindowEnd(MediaTime::positiveInfiniteTime()) |
| 112 | , m_groupStartTimestamp(MediaTime::invalidTime()) |
| 113 | , m_groupEndTimestamp(MediaTime::zeroTime()) |
| 114 | , m_buffered(TimeRanges::create()) |
| 115 | , m_appendState(WaitingForSegment) |
| 116 | , m_timeOfBufferingMonitor(MonotonicTime::now()) |
| 117 | , m_pendingRemoveStart(MediaTime::invalidTime()) |
| 118 | , m_pendingRemoveEnd(MediaTime::invalidTime()) |
| 119 | , m_removeTimer(*this, &SourceBuffer::removeTimerFired) |
| 120 | #if !RELEASE_LOG_DISABLED |
| 121 | , m_logger(m_private->sourceBufferLogger()) |
| 122 | , m_logIdentifier(m_private->sourceBufferLogIdentifier()) |
| 123 | #endif |
| 124 | { |
| 125 | ASSERT(m_source); |
| 126 | ALWAYS_LOG(LOGIDENTIFIER); |
| 127 | |
| 128 | m_private->setClient(this); |
| 129 | } |
| 130 | |
| 131 | SourceBuffer::~SourceBuffer() |
| 132 | { |
| 133 | ASSERT(isRemoved()); |
| 134 | ALWAYS_LOG(LOGIDENTIFIER); |
| 135 | |
| 136 | m_private->setClient(nullptr); |
| 137 | } |
| 138 | |
| 139 | ExceptionOr<Ref<TimeRanges>> SourceBuffer::buffered() const |
| 140 | { |
| 141 | // Section 3.1 buffered attribute steps. |
| 142 | // https://dvcs.w3.org/hg/html-media/raw-file/default/media-source/media-source.html#attributes-1 |
| 143 | // 1. If this object has been removed from the sourceBuffers attribute of the parent media source then throw an |
| 144 | // InvalidStateError exception and abort these steps. |
| 145 | if (isRemoved()) |
| 146 | return Exception { InvalidStateError }; |
| 147 | |
| 148 | // 2. Return a new static normalized TimeRanges object for the media segments buffered. |
| 149 | return m_buffered->copy(); |
| 150 | } |
| 151 | |
| 152 | double SourceBuffer::timestampOffset() const |
| 153 | { |
| 154 | return m_timestampOffset.toDouble(); |
| 155 | } |
| 156 | |
| 157 | ExceptionOr<void> SourceBuffer::setTimestampOffset(double offset) |
| 158 | { |
| 159 | // Section 3.1 timestampOffset attribute setter steps. |
| 160 | // https://dvcs.w3.org/hg/html-media/raw-file/default/media-source/media-source.html#attributes-1 |
| 161 | // 1. Let new timestamp offset equal the new value being assigned to this attribute. |
| 162 | // 2. If this object has been removed from the sourceBuffers attribute of the parent media source, then throw an |
| 163 | // InvalidStateError exception and abort these steps. |
| 164 | // 3. If the updating attribute equals true, then throw an InvalidStateError exception and abort these steps. |
| 165 | if (isRemoved() || m_updating) |
| 166 | return Exception { InvalidStateError }; |
| 167 | |
| 168 | // 4. If the readyState attribute of the parent media source is in the "ended" state then run the following steps: |
| 169 | // 4.1 Set the readyState attribute of the parent media source to "open" |
| 170 | // 4.2 Queue a task to fire a simple event named sourceopen at the parent media source. |
| 171 | m_source->openIfInEndedState(); |
| 172 | |
| 173 | // 5. If the append state equals PARSING_MEDIA_SEGMENT, then throw an InvalidStateError and abort these steps. |
| 174 | if (m_appendState == ParsingMediaSegment) |
| 175 | return Exception { InvalidStateError }; |
| 176 | |
| 177 | MediaTime newTimestampOffset = MediaTime::createWithDouble(offset); |
| 178 | |
| 179 | // 6. If the mode attribute equals "sequence", then set the group start timestamp to new timestamp offset. |
| 180 | if (m_mode == AppendMode::Sequence) |
| 181 | m_groupStartTimestamp = newTimestampOffset; |
| 182 | |
| 183 | // 7. Update the attribute to the new value. |
| 184 | m_timestampOffset = newTimestampOffset; |
| 185 | |
| 186 | for (auto& trackBuffer : m_trackBufferMap.values()) { |
| 187 | trackBuffer.lastFrameTimescale = 0; |
| 188 | trackBuffer.roundedTimestampOffset = MediaTime::invalidTime(); |
| 189 | } |
| 190 | |
| 191 | return { }; |
| 192 | } |
| 193 | |
| 194 | double SourceBuffer::appendWindowStart() const |
| 195 | { |
| 196 | return m_appendWindowStart.toDouble(); |
| 197 | } |
| 198 | |
| 199 | ExceptionOr<void> SourceBuffer::setAppendWindowStart(double newValue) |
| 200 | { |
| 201 | // Section 3.1 appendWindowStart attribute setter steps. |
| 202 | // W3C Editor's Draft 16 September 2016 |
| 203 | // https://rawgit.com/w3c/media-source/45627646344eea0170dd1cbc5a3d508ca751abb8/media-source-respec.html#dom-sourcebuffer-appendwindowstart |
| 204 | // 1. If this object has been removed from the sourceBuffers attribute of the parent media source, |
| 205 | // then throw an InvalidStateError exception and abort these steps. |
| 206 | // 2. If the updating attribute equals true, then throw an InvalidStateError exception and abort these steps. |
| 207 | if (isRemoved() || m_updating) |
| 208 | return Exception { InvalidStateError }; |
| 209 | |
| 210 | // 3. If the new value is less than 0 or greater than or equal to appendWindowEnd then |
| 211 | // throw an TypeError exception and abort these steps. |
| 212 | if (newValue < 0 || newValue >= m_appendWindowEnd.toDouble()) |
| 213 | return Exception { TypeError }; |
| 214 | |
| 215 | // 4. Update the attribute to the new value. |
| 216 | m_appendWindowStart = MediaTime::createWithDouble(newValue); |
| 217 | |
| 218 | return { }; |
| 219 | } |
| 220 | |
| 221 | double SourceBuffer::appendWindowEnd() const |
| 222 | { |
| 223 | return m_appendWindowEnd.toDouble(); |
| 224 | } |
| 225 | |
| 226 | ExceptionOr<void> SourceBuffer::setAppendWindowEnd(double newValue) |
| 227 | { |
| 228 | // Section 3.1 appendWindowEnd attribute setter steps. |
| 229 | // W3C Editor's Draft 16 September 2016 |
| 230 | // https://rawgit.com/w3c/media-source/45627646344eea0170dd1cbc5a3d508ca751abb8/media-source-respec.html#dom-sourcebuffer-appendwindowend |
| 231 | // 1. If this object has been removed from the sourceBuffers attribute of the parent media source, |
| 232 | // then throw an InvalidStateError exception and abort these steps. |
| 233 | // 2. If the updating attribute equals true, then throw an InvalidStateError exception and abort these steps. |
| 234 | if (isRemoved() || m_updating) |
| 235 | return Exception { InvalidStateError }; |
| 236 | |
| 237 | // 3. If the new value equals NaN, then throw an TypeError and abort these steps. |
| 238 | // 4. If the new value is less than or equal to appendWindowStart then throw an TypeError exception |
| 239 | // and abort these steps. |
| 240 | if (std::isnan(newValue) || newValue <= m_appendWindowStart.toDouble()) |
| 241 | return Exception { TypeError }; |
| 242 | |
| 243 | // 5.. Update the attribute to the new value. |
| 244 | m_appendWindowEnd = MediaTime::createWithDouble(newValue); |
| 245 | |
| 246 | return { }; |
| 247 | } |
| 248 | |
| 249 | ExceptionOr<void> SourceBuffer::appendBuffer(const BufferSource& data) |
| 250 | { |
| 251 | return appendBufferInternal(static_cast<const unsigned char*>(data.data()), data.length()); |
| 252 | } |
| 253 | |
| 254 | void SourceBuffer::resetParserState() |
| 255 | { |
| 256 | // Section 3.5.2 Reset Parser State algorithm steps. |
| 257 | // http://www.w3.org/TR/2014/CR-media-source-20140717/#sourcebuffer-reset-parser-state |
| 258 | // 1. If the append state equals PARSING_MEDIA_SEGMENT and the input buffer contains some complete coded frames, |
| 259 | // then run the coded frame processing algorithm until all of these complete coded frames have been processed. |
| 260 | // FIXME: If any implementation will work in pulling mode (instead of async push to SourceBufferPrivate, and forget) |
| 261 | // this should be handled somehow either here, or in m_private->abort(); |
| 262 | |
| 263 | // 2. Unset the last decode timestamp on all track buffers. |
| 264 | // 3. Unset the last frame duration on all track buffers. |
| 265 | // 4. Unset the highest presentation timestamp on all track buffers. |
| 266 | // 5. Set the need random access point flag on all track buffers to true. |
| 267 | for (auto& trackBufferPair : m_trackBufferMap.values()) { |
| 268 | trackBufferPair.lastDecodeTimestamp = MediaTime::invalidTime(); |
| 269 | trackBufferPair.greatestDecodeDuration = MediaTime::invalidTime(); |
| 270 | trackBufferPair.lastFrameDuration = MediaTime::invalidTime(); |
| 271 | trackBufferPair.highestPresentationTimestamp = MediaTime::invalidTime(); |
| 272 | trackBufferPair.needRandomAccessFlag = true; |
| 273 | } |
| 274 | // 6. Remove all bytes from the input buffer. |
| 275 | // Note: this is handled by abortIfUpdating() |
| 276 | // 7. Set append state to WAITING_FOR_SEGMENT. |
| 277 | m_appendState = WaitingForSegment; |
| 278 | |
| 279 | m_private->resetParserState(); |
| 280 | } |
| 281 | |
| 282 | ExceptionOr<void> SourceBuffer::abort() |
| 283 | { |
| 284 | // Section 3.2 abort() method steps. |
| 285 | // https://rawgit.com/w3c/media-source/45627646344eea0170dd1cbc5a3d508ca751abb8/media-source-respec.html#dom-sourcebuffer-abort |
| 286 | // 1. If this object has been removed from the sourceBuffers attribute of the parent media source |
| 287 | // then throw an InvalidStateError exception and abort these steps. |
| 288 | // 2. If the readyState attribute of the parent media source is not in the "open" state |
| 289 | // then throw an InvalidStateError exception and abort these steps. |
| 290 | if (isRemoved() || !m_source->isOpen()) |
| 291 | return Exception { InvalidStateError }; |
| 292 | |
| 293 | // 3. If the range removal algorithm is running, then throw an InvalidStateError exception and abort these steps. |
| 294 | if (m_removeTimer.isActive()) |
| 295 | return Exception { InvalidStateError }; |
| 296 | |
| 297 | // 4. If the sourceBuffer.updating attribute equals true, then run the following steps: ... |
| 298 | abortIfUpdating(); |
| 299 | |
| 300 | // 5. Run the reset parser state algorithm. |
| 301 | resetParserState(); |
| 302 | |
| 303 | // 6. Set appendWindowStart to the presentation start time. |
| 304 | m_appendWindowStart = MediaTime::zeroTime(); |
| 305 | |
| 306 | // 7. Set appendWindowEnd to positive Infinity. |
| 307 | m_appendWindowEnd = MediaTime::positiveInfiniteTime(); |
| 308 | |
| 309 | return { }; |
| 310 | } |
| 311 | |
| 312 | ExceptionOr<void> SourceBuffer::remove(double start, double end) |
| 313 | { |
| 314 | return remove(MediaTime::createWithDouble(start), MediaTime::createWithDouble(end)); |
| 315 | } |
| 316 | |
| 317 | ExceptionOr<void> SourceBuffer::remove(const MediaTime& start, const MediaTime& end) |
| 318 | { |
| 319 | DEBUG_LOG(LOGIDENTIFIER, "start = " , start, ", end = " , end); |
| 320 | |
| 321 | // https://rawgit.com/w3c/media-source/45627646344eea0170dd1cbc5a3d508ca751abb8/media-source-respec.html#dom-sourcebuffer-remove |
| 322 | // Section 3.2 remove() method steps. |
| 323 | // 1. If this object has been removed from the sourceBuffers attribute of the parent media source then throw |
| 324 | // an InvalidStateError exception and abort these steps. |
| 325 | // 2. If the updating attribute equals true, then throw an InvalidStateError exception and abort these steps. |
| 326 | if (isRemoved() || m_updating) |
| 327 | return Exception { InvalidStateError }; |
| 328 | |
| 329 | // 3. If duration equals NaN, then throw a TypeError exception and abort these steps. |
| 330 | // 4. If start is negative or greater than duration, then throw a TypeError exception and abort these steps. |
| 331 | // 5. If end is less than or equal to start or end equals NaN, then throw a TypeError exception and abort these steps. |
| 332 | if (m_source->duration().isInvalid() |
| 333 | || end.isInvalid() |
| 334 | || start.isInvalid() |
| 335 | || start < MediaTime::zeroTime() |
| 336 | || start > m_source->duration() |
| 337 | || end <= start) { |
| 338 | return Exception { TypeError }; |
| 339 | } |
| 340 | |
| 341 | // 6. If the readyState attribute of the parent media source is in the "ended" state then run the following steps: |
| 342 | // 6.1. Set the readyState attribute of the parent media source to "open" |
| 343 | // 6.2. Queue a task to fire a simple event named sourceopen at the parent media source . |
| 344 | m_source->openIfInEndedState(); |
| 345 | |
| 346 | // 7. Run the range removal algorithm with start and end as the start and end of the removal range. |
| 347 | rangeRemoval(start, end); |
| 348 | |
| 349 | return { }; |
| 350 | } |
| 351 | |
| 352 | void SourceBuffer::rangeRemoval(const MediaTime& start, const MediaTime& end) |
| 353 | { |
| 354 | // 3.5.7 Range Removal |
| 355 | // https://rawgit.com/w3c/media-source/7bbe4aa33c61ec025bc7acbd80354110f6a000f9/media-source.html#sourcebuffer-range-removal |
| 356 | // 1. Let start equal the starting presentation timestamp for the removal range. |
| 357 | // 2. Let end equal the end presentation timestamp for the removal range. |
| 358 | // 3. Set the updating attribute to true. |
| 359 | m_updating = true; |
| 360 | |
| 361 | // 4. Queue a task to fire a simple event named updatestart at this SourceBuffer object. |
| 362 | scheduleEvent(eventNames().updatestartEvent); |
| 363 | |
| 364 | // 5. Return control to the caller and run the rest of the steps asynchronously. |
| 365 | m_pendingRemoveStart = start; |
| 366 | m_pendingRemoveEnd = end; |
| 367 | m_removeTimer.startOneShot(0_s); |
| 368 | } |
| 369 | |
| 370 | ExceptionOr<void> SourceBuffer::changeType(const String& type) |
| 371 | { |
| 372 | // changeType() proposed API. See issue #155: <https://github.com/w3c/media-source/issues/155> |
| 373 | // https://rawgit.com/wicg/media-source/codec-switching/index.html#dom-sourcebuffer-changetype |
| 374 | |
| 375 | // 1. If type is an empty string then throw a TypeError exception and abort these steps. |
| 376 | if (type.isEmpty()) |
| 377 | return Exception { TypeError }; |
| 378 | |
| 379 | // 2. If this object has been removed from the sourceBuffers attribute of the parent media source, |
| 380 | // then throw an InvalidStateError exception and abort these steps. |
| 381 | // 3. If the updating attribute equals true, then throw an InvalidStateError exception and abort these steps. |
| 382 | if (isRemoved() || m_updating) |
| 383 | return Exception { InvalidStateError }; |
| 384 | |
| 385 | // 4. If type contains a MIME type that is not supported or contains a MIME type that is not supported with |
| 386 | // the types specified (currently or previously) of SourceBuffer objects in the sourceBuffers attribute of |
| 387 | // the parent media source, then throw a NotSupportedError exception and abort these steps. |
| 388 | ContentType contentType(type); |
| 389 | if (!m_private->canSwitchToType(contentType)) |
| 390 | return Exception { NotSupportedError }; |
| 391 | |
| 392 | // 5. If the readyState attribute of the parent media source is in the "ended" state then run the following |
| 393 | // steps: |
| 394 | // 5.1. Set the readyState attribute of the parent media source to "open" |
| 395 | // 5.2. Queue a task to fire a simple event named sourceopen at the parent media source. |
| 396 | m_source->openIfInEndedState(); |
| 397 | |
| 398 | // 6. Run the reset parser state algorithm. |
| 399 | resetParserState(); |
| 400 | |
| 401 | // 7. Update the generate timestamps flag on this SourceBuffer object to the value in the "Generate Timestamps |
| 402 | // Flag" column of the byte stream format registry [MSE-REGISTRY] entry that is associated with type. |
| 403 | setShouldGenerateTimestamps(MediaSource::contentTypeShouldGenerateTimestamps(contentType)); |
| 404 | |
| 405 | // ↳ If the generate timestamps flag equals true: |
| 406 | // Set the mode attribute on this SourceBuffer object to "sequence", including running the associated steps |
| 407 | // for that attribute being set. |
| 408 | if (m_shouldGenerateTimestamps) |
| 409 | setMode(AppendMode::Sequence); |
| 410 | |
| 411 | // ↳ Otherwise: |
| 412 | // Keep the previous value of the mode attribute on this SourceBuffer object, without running any associated |
| 413 | // steps for that attribute being set. |
| 414 | // NOTE: No-op. |
| 415 | |
| 416 | // 9. Set pending initialization segment for changeType flag to true. |
| 417 | m_pendingInitializationSegmentForChangeType = true; |
| 418 | |
| 419 | return { }; |
| 420 | } |
| 421 | |
| 422 | void SourceBuffer::abortIfUpdating() |
| 423 | { |
| 424 | // Section 3.2 abort() method step 4 substeps. |
| 425 | // https://rawgit.com/w3c/media-source/45627646344eea0170dd1cbc5a3d508ca751abb8/media-source-respec.html#dom-sourcebuffer-abort |
| 426 | |
| 427 | if (!m_updating) |
| 428 | return; |
| 429 | |
| 430 | // 4.1. Abort the buffer append algorithm if it is running. |
| 431 | m_appendBufferTimer.stop(); |
| 432 | m_pendingAppendData.clear(); |
| 433 | m_private->abort(); |
| 434 | |
| 435 | // 4.2. Set the updating attribute to false. |
| 436 | m_updating = false; |
| 437 | |
| 438 | // 4.3. Queue a task to fire a simple event named abort at this SourceBuffer object. |
| 439 | scheduleEvent(eventNames().abortEvent); |
| 440 | |
| 441 | // 4.4. Queue a task to fire a simple event named updateend at this SourceBuffer object. |
| 442 | scheduleEvent(eventNames().updateendEvent); |
| 443 | } |
| 444 | |
| 445 | MediaTime SourceBuffer::highestPresentationTimestamp() const |
| 446 | { |
| 447 | MediaTime highestTime; |
| 448 | for (auto& trackBuffer : m_trackBufferMap.values()) { |
| 449 | auto lastSampleIter = trackBuffer.samples.presentationOrder().rbegin(); |
| 450 | if (lastSampleIter == trackBuffer.samples.presentationOrder().rend()) |
| 451 | continue; |
| 452 | highestTime = std::max(highestTime, lastSampleIter->first); |
| 453 | } |
| 454 | return highestTime; |
| 455 | } |
| 456 | |
| 457 | void SourceBuffer::readyStateChanged() |
| 458 | { |
| 459 | updateBufferedFromTrackBuffers(); |
| 460 | } |
| 461 | |
| 462 | void SourceBuffer::removedFromMediaSource() |
| 463 | { |
| 464 | if (isRemoved()) |
| 465 | return; |
| 466 | |
| 467 | abortIfUpdating(); |
| 468 | |
| 469 | for (auto& trackBufferPair : m_trackBufferMap.values()) { |
| 470 | trackBufferPair.samples.clear(); |
| 471 | trackBufferPair.decodeQueue.clear(); |
| 472 | } |
| 473 | |
| 474 | m_private->removedFromMediaSource(); |
| 475 | m_source = nullptr; |
| 476 | } |
| 477 | |
| 478 | void SourceBuffer::seekToTime(const MediaTime& time) |
| 479 | { |
| 480 | ALWAYS_LOG(LOGIDENTIFIER, time); |
| 481 | |
| 482 | for (auto& trackBufferPair : m_trackBufferMap) { |
| 483 | TrackBuffer& trackBuffer = trackBufferPair.value; |
| 484 | const AtomicString& trackID = trackBufferPair.key; |
| 485 | |
| 486 | trackBuffer.needsReenqueueing = true; |
| 487 | reenqueueMediaForTime(trackBuffer, trackID, time); |
| 488 | } |
| 489 | } |
| 490 | |
| 491 | MediaTime SourceBuffer::sourceBufferPrivateFastSeekTimeForMediaTime(const MediaTime& targetTime, const MediaTime& negativeThreshold, const MediaTime& positiveThreshold) |
| 492 | { |
| 493 | MediaTime seekTime = targetTime; |
| 494 | MediaTime lowerBoundTime = targetTime - negativeThreshold; |
| 495 | MediaTime upperBoundTime = targetTime + positiveThreshold; |
| 496 | |
| 497 | for (auto& trackBuffer : m_trackBufferMap.values()) { |
| 498 | // Find the sample which contains the target time time. |
| 499 | auto futureSyncSampleIterator = trackBuffer.samples.decodeOrder().findSyncSampleAfterPresentationTime(targetTime, positiveThreshold); |
| 500 | auto pastSyncSampleIterator = trackBuffer.samples.decodeOrder().findSyncSamplePriorToPresentationTime(targetTime, negativeThreshold); |
| 501 | auto upperBound = trackBuffer.samples.decodeOrder().end(); |
| 502 | auto lowerBound = trackBuffer.samples.decodeOrder().rend(); |
| 503 | |
| 504 | if (futureSyncSampleIterator == upperBound && pastSyncSampleIterator == lowerBound) |
| 505 | continue; |
| 506 | |
| 507 | MediaTime futureSeekTime = MediaTime::positiveInfiniteTime(); |
| 508 | if (futureSyncSampleIterator != upperBound) { |
| 509 | RefPtr<MediaSample>& sample = futureSyncSampleIterator->second; |
| 510 | futureSeekTime = sample->presentationTime(); |
| 511 | } |
| 512 | |
| 513 | MediaTime pastSeekTime = MediaTime::negativeInfiniteTime(); |
| 514 | if (pastSyncSampleIterator != lowerBound) { |
| 515 | RefPtr<MediaSample>& sample = pastSyncSampleIterator->second; |
| 516 | pastSeekTime = sample->presentationTime(); |
| 517 | } |
| 518 | |
| 519 | MediaTime trackSeekTime = abs(targetTime - futureSeekTime) < abs(targetTime - pastSeekTime) ? futureSeekTime : pastSeekTime; |
| 520 | if (abs(targetTime - trackSeekTime) > abs(targetTime - seekTime)) |
| 521 | seekTime = trackSeekTime; |
| 522 | } |
| 523 | |
| 524 | return seekTime; |
| 525 | } |
| 526 | |
| 527 | bool SourceBuffer::hasPendingActivity() const |
| 528 | { |
| 529 | return m_source || m_asyncEventQueue.hasPendingEvents(); |
| 530 | } |
| 531 | |
| 532 | void SourceBuffer::suspend(ReasonForSuspension reason) |
| 533 | { |
| 534 | switch (reason) { |
| 535 | case ReasonForSuspension::PageCache: |
| 536 | case ReasonForSuspension::PageWillBeSuspended: |
| 537 | m_asyncEventQueue.suspend(); |
| 538 | break; |
| 539 | case ReasonForSuspension::JavaScriptDebuggerPaused: |
| 540 | case ReasonForSuspension::WillDeferLoading: |
| 541 | // Do nothing, we don't pause media playback in these cases. |
| 542 | break; |
| 543 | } |
| 544 | } |
| 545 | |
| 546 | void SourceBuffer::resume() |
| 547 | { |
| 548 | m_asyncEventQueue.resume(); |
| 549 | } |
| 550 | |
| 551 | void SourceBuffer::stop() |
| 552 | { |
| 553 | m_asyncEventQueue.close(); |
| 554 | m_appendBufferTimer.stop(); |
| 555 | m_removeTimer.stop(); |
| 556 | } |
| 557 | |
| 558 | bool SourceBuffer::canSuspendForDocumentSuspension() const |
| 559 | { |
| 560 | return !hasPendingActivity(); |
| 561 | } |
| 562 | |
| 563 | const char* SourceBuffer::activeDOMObjectName() const |
| 564 | { |
| 565 | return "SourceBuffer" ; |
| 566 | } |
| 567 | |
| 568 | bool SourceBuffer::isRemoved() const |
| 569 | { |
| 570 | return !m_source; |
| 571 | } |
| 572 | |
| 573 | void SourceBuffer::scheduleEvent(const AtomicString& eventName) |
| 574 | { |
| 575 | auto event = Event::create(eventName, Event::CanBubble::No, Event::IsCancelable::No); |
| 576 | event->setTarget(this); |
| 577 | |
| 578 | m_asyncEventQueue.enqueueEvent(WTFMove(event)); |
| 579 | } |
| 580 | |
| 581 | ExceptionOr<void> SourceBuffer::appendBufferInternal(const unsigned char* data, unsigned size) |
| 582 | { |
| 583 | // Section 3.2 appendBuffer() |
| 584 | // https://dvcs.w3.org/hg/html-media/raw-file/default/media-source/media-source.html#widl-SourceBuffer-appendBuffer-void-ArrayBufferView-data |
| 585 | |
| 586 | // Step 1 is enforced by the caller. |
| 587 | // 2. Run the prepare append algorithm. |
| 588 | // Section 3.5.4 Prepare AppendAlgorithm |
| 589 | |
| 590 | // 1. If the SourceBuffer has been removed from the sourceBuffers attribute of the parent media source |
| 591 | // then throw an InvalidStateError exception and abort these steps. |
| 592 | // 2. If the updating attribute equals true, then throw an InvalidStateError exception and abort these steps. |
| 593 | if (isRemoved() || m_updating) |
| 594 | return Exception { InvalidStateError }; |
| 595 | |
| 596 | // 3. If the readyState attribute of the parent media source is in the "ended" state then run the following steps: |
| 597 | // 3.1. Set the readyState attribute of the parent media source to "open" |
| 598 | // 3.2. Queue a task to fire a simple event named sourceopen at the parent media source . |
| 599 | m_source->openIfInEndedState(); |
| 600 | |
| 601 | // 4. Run the coded frame eviction algorithm. |
| 602 | evictCodedFrames(size); |
| 603 | |
| 604 | // FIXME: enable this code when MSE libraries have been updated to support it. |
| 605 | #if USE(GSTREAMER) |
| 606 | // 5. If the buffer full flag equals true, then throw a QuotaExceededError exception and abort these step. |
| 607 | if (m_bufferFull) { |
| 608 | ERROR_LOG(LOGIDENTIFIER, "buffer full, failing with QuotaExceededError error" ); |
| 609 | return Exception { QuotaExceededError }; |
| 610 | } |
| 611 | #endif |
| 612 | |
| 613 | // NOTE: Return to 3.2 appendBuffer() |
| 614 | // 3. Add data to the end of the input buffer. |
| 615 | m_pendingAppendData.append(data, size); |
| 616 | |
| 617 | // 4. Set the updating attribute to true. |
| 618 | m_updating = true; |
| 619 | |
| 620 | // 5. Queue a task to fire a simple event named updatestart at this SourceBuffer object. |
| 621 | scheduleEvent(eventNames().updatestartEvent); |
| 622 | |
| 623 | // 6. Asynchronously run the buffer append algorithm. |
| 624 | m_appendBufferTimer.startOneShot(0_s); |
| 625 | |
| 626 | reportExtraMemoryAllocated(); |
| 627 | |
| 628 | return { }; |
| 629 | } |
| 630 | |
| 631 | void SourceBuffer::appendBufferTimerFired() |
| 632 | { |
| 633 | if (isRemoved()) |
| 634 | return; |
| 635 | |
| 636 | ASSERT(m_updating); |
| 637 | |
| 638 | // Section 3.5.5 Buffer Append Algorithm |
| 639 | // https://dvcs.w3.org/hg/html-media/raw-file/default/media-source/media-source.html#sourcebuffer-buffer-append |
| 640 | |
| 641 | // 1. Run the segment parser loop algorithm. |
| 642 | |
| 643 | // Section 3.5.1 Segment Parser Loop |
| 644 | // https://dvcs.w3.org/hg/html-media/raw-file/tip/media-source/media-source.html#sourcebuffer-segment-parser-loop |
| 645 | // When the segment parser loop algorithm is invoked, run the following steps: |
| 646 | |
| 647 | // 1. Loop Top: If the input buffer is empty, then jump to the need more data step below. |
| 648 | if (!m_pendingAppendData.size()) { |
| 649 | sourceBufferPrivateAppendComplete(AppendSucceeded); |
| 650 | return; |
| 651 | } |
| 652 | |
| 653 | // Manually clear out the m_pendingAppendData Vector, in case the platform implementation |
| 654 | // rejects appending the buffer for whatever reason. |
| 655 | // FIXME: The implementation should guarantee the move from this Vector, and we should |
| 656 | // assert here to confirm that. See https://bugs.webkit.org/show_bug.cgi?id=178003. |
| 657 | m_private->append(WTFMove(m_pendingAppendData)); |
| 658 | m_pendingAppendData.clear(); |
| 659 | } |
| 660 | |
| 661 | void SourceBuffer::sourceBufferPrivateAppendComplete(AppendResult result) |
| 662 | { |
| 663 | if (isRemoved()) |
| 664 | return; |
| 665 | |
| 666 | // Resolve the changes it TrackBuffers' buffered ranges |
| 667 | // into the SourceBuffer's buffered ranges |
| 668 | updateBufferedFromTrackBuffers(); |
| 669 | |
| 670 | // Section 3.5.5 Buffer Append Algorithm, ctd. |
| 671 | // https://dvcs.w3.org/hg/html-media/raw-file/default/media-source/media-source.html#sourcebuffer-buffer-append |
| 672 | |
| 673 | // 2. If the input buffer contains bytes that violate the SourceBuffer byte stream format specification, |
| 674 | // then run the append error algorithm with the decode error parameter set to true and abort this algorithm. |
| 675 | if (result == ParsingFailed) { |
| 676 | ERROR_LOG(LOGIDENTIFIER, "ParsingFailed" ); |
| 677 | appendError(true); |
| 678 | return; |
| 679 | } |
| 680 | |
| 681 | // NOTE: Steps 3 - 6 enforced by sourceBufferPrivateDidReceiveInitializationSegment() and |
| 682 | // sourceBufferPrivateDidReceiveSample below. |
| 683 | |
| 684 | // 7. Need more data: Return control to the calling algorithm. |
| 685 | |
| 686 | // NOTE: return to Section 3.5.5 |
| 687 | // 2.If the segment parser loop algorithm in the previous step was aborted, then abort this algorithm. |
| 688 | if (result != AppendSucceeded) |
| 689 | return; |
| 690 | |
| 691 | // 3. Set the updating attribute to false. |
| 692 | m_updating = false; |
| 693 | |
| 694 | // 4. Queue a task to fire a simple event named update at this SourceBuffer object. |
| 695 | scheduleEvent(eventNames().updateEvent); |
| 696 | |
| 697 | // 5. Queue a task to fire a simple event named updateend at this SourceBuffer object. |
| 698 | scheduleEvent(eventNames().updateendEvent); |
| 699 | |
| 700 | if (m_source) |
| 701 | m_source->monitorSourceBuffers(); |
| 702 | |
| 703 | MediaTime currentMediaTime = m_source->currentTime(); |
| 704 | for (auto& trackBufferPair : m_trackBufferMap) { |
| 705 | TrackBuffer& trackBuffer = trackBufferPair.value; |
| 706 | const AtomicString& trackID = trackBufferPair.key; |
| 707 | |
| 708 | if (trackBuffer.needsReenqueueing) { |
| 709 | DEBUG_LOG(LOGIDENTIFIER, "reenqueuing at time " , currentMediaTime); |
| 710 | reenqueueMediaForTime(trackBuffer, trackID, currentMediaTime); |
| 711 | } else |
| 712 | provideMediaData(trackBuffer, trackID); |
| 713 | } |
| 714 | |
| 715 | reportExtraMemoryAllocated(); |
| 716 | if (extraMemoryCost() > this->maximumBufferSize()) |
| 717 | m_bufferFull = true; |
| 718 | |
| 719 | DEBUG_LOG(LOGIDENTIFIER); |
| 720 | } |
| 721 | |
| 722 | void SourceBuffer::sourceBufferPrivateDidReceiveRenderingError(int error) |
| 723 | { |
| 724 | #if RELEASE_LOG_DISABLED |
| 725 | UNUSED_PARAM(error); |
| 726 | #endif |
| 727 | |
| 728 | ERROR_LOG(LOGIDENTIFIER, error); |
| 729 | |
| 730 | if (!isRemoved()) |
| 731 | m_source->streamEndedWithError(MediaSource::EndOfStreamError::Decode); |
| 732 | } |
| 733 | |
| 734 | static bool decodeTimeComparator(const PresentationOrderSampleMap::MapType::value_type& a, const PresentationOrderSampleMap::MapType::value_type& b) |
| 735 | { |
| 736 | return a.second->decodeTime() < b.second->decodeTime(); |
| 737 | } |
| 738 | |
| 739 | static PlatformTimeRanges removeSamplesFromTrackBuffer(const DecodeOrderSampleMap::MapType& samples, SourceBuffer::TrackBuffer& trackBuffer, const SourceBuffer* buffer, const char* logPrefix) |
| 740 | { |
| 741 | #if !RELEASE_LOG_DISABLED |
| 742 | MediaTime earliestSample = MediaTime::positiveInfiniteTime(); |
| 743 | MediaTime latestSample = MediaTime::zeroTime(); |
| 744 | size_t bytesRemoved = 0; |
| 745 | auto logIdentifier = WTF::Logger::LogSiteIdentifier(buffer->logClassName(), logPrefix, buffer->logIdentifier()); |
| 746 | auto& logger = buffer->logger(); |
| 747 | auto willLog = logger.willLog(buffer->logChannel(), WTFLogLevel::Debug); |
| 748 | #else |
| 749 | UNUSED_PARAM(logPrefix); |
| 750 | UNUSED_PARAM(buffer); |
| 751 | #endif |
| 752 | |
| 753 | PlatformTimeRanges erasedRanges; |
| 754 | for (const auto& sampleIt : samples) { |
| 755 | const DecodeOrderSampleMap::KeyType& decodeKey = sampleIt.first; |
| 756 | #if !RELEASE_LOG_DISABLED |
| 757 | size_t startBufferSize = trackBuffer.samples.sizeInBytes(); |
| 758 | #endif |
| 759 | |
| 760 | const RefPtr<MediaSample>& sample = sampleIt.second; |
| 761 | |
| 762 | #if !RELEASE_LOG_DISABLED |
| 763 | if (willLog) |
| 764 | logger.debug(buffer->logChannel(), logIdentifier, "removing sample " , *sampleIt.second); |
| 765 | #endif |
| 766 | |
| 767 | // Remove the erased samples from the TrackBuffer sample map. |
| 768 | trackBuffer.samples.removeSample(sample.get()); |
| 769 | |
| 770 | // Also remove the erased samples from the TrackBuffer decodeQueue. |
| 771 | trackBuffer.decodeQueue.erase(decodeKey); |
| 772 | |
| 773 | auto startTime = sample->presentationTime(); |
| 774 | auto endTime = startTime + sample->duration(); |
| 775 | erasedRanges.add(startTime, endTime); |
| 776 | |
| 777 | #if !RELEASE_LOG_DISABLED |
| 778 | bytesRemoved += startBufferSize - trackBuffer.samples.sizeInBytes(); |
| 779 | if (startTime < earliestSample) |
| 780 | earliestSample = startTime; |
| 781 | if (endTime > latestSample) |
| 782 | latestSample = endTime; |
| 783 | #endif |
| 784 | } |
| 785 | |
| 786 | // Because we may have added artificial padding in the buffered ranges when adding samples, we may |
| 787 | // need to remove that padding when removing those same samples. Walk over the erased ranges looking |
| 788 | // for unbuffered areas and expand erasedRanges to encompass those areas. |
| 789 | PlatformTimeRanges additionalErasedRanges; |
| 790 | for (unsigned i = 0; i < erasedRanges.length(); ++i) { |
| 791 | auto erasedStart = erasedRanges.start(i); |
| 792 | auto erasedEnd = erasedRanges.end(i); |
| 793 | auto startIterator = trackBuffer.samples.presentationOrder().reverseFindSampleBeforePresentationTime(erasedStart); |
| 794 | if (startIterator == trackBuffer.samples.presentationOrder().rend()) |
| 795 | additionalErasedRanges.add(MediaTime::zeroTime(), erasedStart); |
| 796 | else { |
| 797 | auto& previousSample = *startIterator->second; |
| 798 | if (previousSample.presentationTime() + previousSample.duration() < erasedStart) |
| 799 | additionalErasedRanges.add(previousSample.presentationTime() + previousSample.duration(), erasedStart); |
| 800 | } |
| 801 | |
| 802 | auto endIterator = trackBuffer.samples.presentationOrder().findSampleStartingOnOrAfterPresentationTime(erasedEnd); |
| 803 | if (endIterator == trackBuffer.samples.presentationOrder().end()) |
| 804 | additionalErasedRanges.add(erasedEnd, MediaTime::positiveInfiniteTime()); |
| 805 | else { |
| 806 | auto& nextSample = *endIterator->second; |
| 807 | if (nextSample.presentationTime() > erasedEnd) |
| 808 | additionalErasedRanges.add(erasedEnd, nextSample.presentationTime()); |
| 809 | } |
| 810 | } |
| 811 | if (additionalErasedRanges.length()) |
| 812 | erasedRanges.unionWith(additionalErasedRanges); |
| 813 | |
| 814 | #if !RELEASE_LOG_DISABLED |
| 815 | if (bytesRemoved && willLog) |
| 816 | logger.debug(buffer->logChannel(), logIdentifier, "removed " , bytesRemoved, ", start = " , earliestSample, ", end = " , latestSample); |
| 817 | #endif |
| 818 | |
| 819 | return erasedRanges; |
| 820 | } |
| 821 | |
| 822 | void SourceBuffer::removeCodedFrames(const MediaTime& start, const MediaTime& end) |
| 823 | { |
| 824 | DEBUG_LOG(LOGIDENTIFIER, "start = " , start, ", end = " , end); |
| 825 | |
| 826 | // 3.5.9 Coded Frame Removal Algorithm |
| 827 | // https://dvcs.w3.org/hg/html-media/raw-file/tip/media-source/media-source.html#sourcebuffer-coded-frame-removal |
| 828 | |
| 829 | // 1. Let start be the starting presentation timestamp for the removal range. |
| 830 | MediaTime durationMediaTime = m_source->duration(); |
| 831 | MediaTime currentMediaTime = m_source->currentTime(); |
| 832 | |
| 833 | // 2. Let end be the end presentation timestamp for the removal range. |
| 834 | // 3. For each track buffer in this source buffer, run the following steps: |
| 835 | for (auto& trackBuffer : m_trackBufferMap.values()) { |
| 836 | // 3.1. Let remove end timestamp be the current value of duration |
| 837 | // 3.2 If this track buffer has a random access point timestamp that is greater than or equal to end, then update |
| 838 | // remove end timestamp to that random access point timestamp. |
| 839 | // NOTE: Step 3.2 will be incorrect for any random access point timestamp whose decode time is later than the sample at end, |
| 840 | // but whose presentation time is less than the sample at end. Skip this step until step 3.3 below. |
| 841 | |
| 842 | // NOTE: To handle MediaSamples which may be an amalgamation of multiple shorter samples, find samples whose presentation |
| 843 | // interval straddles the start and end times, and divide them if possible: |
| 844 | auto divideSampleIfPossibleAtPresentationTime = [&] (const MediaTime& time) { |
| 845 | auto sampleIterator = trackBuffer.samples.presentationOrder().findSampleContainingPresentationTime(time); |
| 846 | if (sampleIterator == trackBuffer.samples.presentationOrder().end()) |
| 847 | return; |
| 848 | RefPtr<MediaSample> sample = sampleIterator->second; |
| 849 | if (!sample->isDivisable()) |
| 850 | return; |
| 851 | std::pair<RefPtr<MediaSample>, RefPtr<MediaSample>> replacementSamples = sample->divide(time); |
| 852 | if (!replacementSamples.first || !replacementSamples.second) |
| 853 | return; |
| 854 | DEBUG_LOG(LOGIDENTIFIER, "splitting sample " , *sample, " into " , *replacementSamples.first, " and " , *replacementSamples.second); |
| 855 | trackBuffer.samples.removeSample(sample.get()); |
| 856 | trackBuffer.samples.addSample(*replacementSamples.first); |
| 857 | trackBuffer.samples.addSample(*replacementSamples.second); |
| 858 | }; |
| 859 | divideSampleIfPossibleAtPresentationTime(start); |
| 860 | divideSampleIfPossibleAtPresentationTime(end); |
| 861 | |
| 862 | auto removePresentationStart = trackBuffer.samples.presentationOrder().findSampleContainingOrAfterPresentationTime(start); |
| 863 | auto removePresentationEnd = trackBuffer.samples.presentationOrder().findSampleStartingOnOrAfterPresentationTime(end); |
| 864 | if (removePresentationStart == removePresentationEnd) |
| 865 | continue; |
| 866 | |
| 867 | // 3.3 Remove all media data, from this track buffer, that contain starting timestamps greater than or equal to |
| 868 | // start and less than the remove end timestamp. |
| 869 | // NOTE: frames must be removed in decode order, so that all dependant frames between the frame to be removed |
| 870 | // and the next sync sample frame are removed. But we must start from the first sample in decode order, not |
| 871 | // presentation order. |
| 872 | auto minmaxDecodeTimeIterPair = std::minmax_element(removePresentationStart, removePresentationEnd, decodeTimeComparator); |
| 873 | auto& firstSample = *minmaxDecodeTimeIterPair.first->second; |
| 874 | auto& lastSample = *minmaxDecodeTimeIterPair.second->second; |
| 875 | auto removeDecodeStart = trackBuffer.samples.decodeOrder().findSampleWithDecodeKey({firstSample.decodeTime(), firstSample.presentationTime()}); |
| 876 | auto removeDecodeLast = trackBuffer.samples.decodeOrder().findSampleWithDecodeKey({lastSample.decodeTime(), lastSample.presentationTime()}); |
| 877 | auto removeDecodeEnd = trackBuffer.samples.decodeOrder().findSyncSampleAfterDecodeIterator(removeDecodeLast); |
| 878 | |
| 879 | DecodeOrderSampleMap::MapType erasedSamples(removeDecodeStart, removeDecodeEnd); |
| 880 | PlatformTimeRanges erasedRanges = removeSamplesFromTrackBuffer(erasedSamples, trackBuffer, this, "removeCodedFrames" ); |
| 881 | |
| 882 | // Only force the TrackBuffer to re-enqueue if the removed ranges overlap with enqueued and possibly |
| 883 | // not yet displayed samples. |
| 884 | if (trackBuffer.lastEnqueuedPresentationTime.isValid() && currentMediaTime < trackBuffer.lastEnqueuedPresentationTime) { |
| 885 | PlatformTimeRanges possiblyEnqueuedRanges(currentMediaTime, trackBuffer.lastEnqueuedPresentationTime); |
| 886 | possiblyEnqueuedRanges.intersectWith(erasedRanges); |
| 887 | if (possiblyEnqueuedRanges.length()) |
| 888 | trackBuffer.needsReenqueueing = true; |
| 889 | } |
| 890 | |
| 891 | erasedRanges.invert(); |
| 892 | trackBuffer.buffered.intersectWith(erasedRanges); |
| 893 | setBufferedDirty(true); |
| 894 | |
| 895 | // 3.4 If this object is in activeSourceBuffers, the current playback position is greater than or equal to start |
| 896 | // and less than the remove end timestamp, and HTMLMediaElement.readyState is greater than HAVE_METADATA, then set |
| 897 | // the HTMLMediaElement.readyState attribute to HAVE_METADATA and stall playback. |
| 898 | if (m_active && currentMediaTime >= start && currentMediaTime < end && m_private->readyState() > MediaPlayer::HaveMetadata) |
| 899 | m_private->setReadyState(MediaPlayer::HaveMetadata); |
| 900 | } |
| 901 | |
| 902 | updateBufferedFromTrackBuffers(); |
| 903 | |
| 904 | // 4. If buffer full flag equals true and this object is ready to accept more bytes, then set the buffer full flag to false. |
| 905 | // No-op |
| 906 | |
| 907 | LOG(Media, "SourceBuffer::removeCodedFrames(%p) - buffered = %s" , this, toString(m_buffered->ranges()).utf8().data()); |
| 908 | } |
| 909 | |
| 910 | void SourceBuffer::removeTimerFired() |
| 911 | { |
| 912 | if (isRemoved()) |
| 913 | return; |
| 914 | |
| 915 | ASSERT(m_updating); |
| 916 | ASSERT(m_pendingRemoveStart.isValid()); |
| 917 | ASSERT(m_pendingRemoveStart < m_pendingRemoveEnd); |
| 918 | |
| 919 | // Section 3.5.7 Range Removal |
| 920 | // http://w3c.github.io/media-source/#sourcebuffer-range-removal |
| 921 | |
| 922 | // 6. Run the coded frame removal algorithm with start and end as the start and end of the removal range. |
| 923 | removeCodedFrames(m_pendingRemoveStart, m_pendingRemoveEnd); |
| 924 | |
| 925 | // 7. Set the updating attribute to false. |
| 926 | m_updating = false; |
| 927 | m_pendingRemoveStart = MediaTime::invalidTime(); |
| 928 | m_pendingRemoveEnd = MediaTime::invalidTime(); |
| 929 | |
| 930 | // 8. Queue a task to fire a simple event named update at this SourceBuffer object. |
| 931 | scheduleEvent(eventNames().updateEvent); |
| 932 | |
| 933 | // 9. Queue a task to fire a simple event named updateend at this SourceBuffer object. |
| 934 | scheduleEvent(eventNames().updateendEvent); |
| 935 | } |
| 936 | |
| 937 | void SourceBuffer::evictCodedFrames(size_t newDataSize) |
| 938 | { |
| 939 | // 3.5.13 Coded Frame Eviction Algorithm |
| 940 | // http://www.w3.org/TR/media-source/#sourcebuffer-coded-frame-eviction |
| 941 | |
| 942 | if (isRemoved()) |
| 943 | return; |
| 944 | |
| 945 | // This algorithm is run to free up space in this source buffer when new data is appended. |
| 946 | // 1. Let new data equal the data that is about to be appended to this SourceBuffer. |
| 947 | // 2. If the buffer full flag equals false, then abort these steps. |
| 948 | if (!m_bufferFull) |
| 949 | return; |
| 950 | |
| 951 | size_t maximumBufferSize = this->maximumBufferSize(); |
| 952 | |
| 953 | // 3. Let removal ranges equal a list of presentation time ranges that can be evicted from |
| 954 | // the presentation to make room for the new data. |
| 955 | |
| 956 | // NOTE: begin by removing data from the beginning of the buffered ranges, 30 seconds at |
| 957 | // a time, up to 30 seconds before currentTime. |
| 958 | MediaTime thirtySeconds = MediaTime(30, 1); |
| 959 | MediaTime currentTime = m_source->currentTime(); |
| 960 | MediaTime maximumRangeEnd = currentTime - thirtySeconds; |
| 961 | |
| 962 | #if !RELEASE_LOG_DISABLED |
| 963 | DEBUG_LOG(LOGIDENTIFIER, "currentTime = " , m_source->currentTime(), ", require " , extraMemoryCost() + newDataSize, " bytes, maximum buffer size is " , maximumBufferSize); |
| 964 | size_t initialBufferedSize = extraMemoryCost(); |
| 965 | #endif |
| 966 | |
| 967 | MediaTime rangeStart = MediaTime::zeroTime(); |
| 968 | MediaTime rangeEnd = rangeStart + thirtySeconds; |
| 969 | while (rangeStart < maximumRangeEnd) { |
| 970 | // 4. For each range in removal ranges, run the coded frame removal algorithm with start and |
| 971 | // end equal to the removal range start and end timestamp respectively. |
| 972 | removeCodedFrames(rangeStart, std::min(rangeEnd, maximumRangeEnd)); |
| 973 | if (extraMemoryCost() + newDataSize < maximumBufferSize) { |
| 974 | m_bufferFull = false; |
| 975 | break; |
| 976 | } |
| 977 | |
| 978 | rangeStart += thirtySeconds; |
| 979 | rangeEnd += thirtySeconds; |
| 980 | } |
| 981 | |
| 982 | #if !RELEASE_LOG_DISABLED |
| 983 | if (!m_bufferFull) { |
| 984 | DEBUG_LOG(LOGIDENTIFIER, "evicted " , initialBufferedSize - extraMemoryCost()); |
| 985 | return; |
| 986 | } |
| 987 | #endif |
| 988 | |
| 989 | // If there still isn't enough free space and there buffers in time ranges after the current range (ie. there is a gap after |
| 990 | // the current buffered range), delete 30 seconds at a time from duration back to the current time range or 30 seconds after |
| 991 | // currenTime whichever we hit first. |
| 992 | auto buffered = m_buffered->ranges(); |
| 993 | size_t currentTimeRange = buffered.find(currentTime); |
| 994 | if (currentTimeRange == notFound || currentTimeRange == buffered.length() - 1) { |
| 995 | #if !RELEASE_LOG_DISABLED |
| 996 | ERROR_LOG(LOGIDENTIFIER, "FAILED to free enough after evicting " , initialBufferedSize - extraMemoryCost()); |
| 997 | #endif |
| 998 | return; |
| 999 | } |
| 1000 | |
| 1001 | MediaTime minimumRangeStart = currentTime + thirtySeconds; |
| 1002 | |
| 1003 | rangeEnd = m_source->duration(); |
| 1004 | rangeStart = rangeEnd - thirtySeconds; |
| 1005 | while (rangeStart > minimumRangeStart) { |
| 1006 | |
| 1007 | // Do not evict data from the time range that contains currentTime. |
| 1008 | size_t startTimeRange = buffered.find(rangeStart); |
| 1009 | if (startTimeRange == currentTimeRange) { |
| 1010 | size_t endTimeRange = buffered.find(rangeEnd); |
| 1011 | if (endTimeRange == currentTimeRange) |
| 1012 | break; |
| 1013 | |
| 1014 | rangeEnd = buffered.start(endTimeRange); |
| 1015 | } |
| 1016 | |
| 1017 | // 4. For each range in removal ranges, run the coded frame removal algorithm with start and |
| 1018 | // end equal to the removal range start and end timestamp respectively. |
| 1019 | removeCodedFrames(std::max(minimumRangeStart, rangeStart), rangeEnd); |
| 1020 | if (extraMemoryCost() + newDataSize < maximumBufferSize) { |
| 1021 | m_bufferFull = false; |
| 1022 | break; |
| 1023 | } |
| 1024 | |
| 1025 | rangeStart -= thirtySeconds; |
| 1026 | rangeEnd -= thirtySeconds; |
| 1027 | } |
| 1028 | |
| 1029 | #if !RELEASE_LOG_DISABLED |
| 1030 | if (m_bufferFull) |
| 1031 | ERROR_LOG(LOGIDENTIFIER, "FAILED to free enough after evicting " , initialBufferedSize - extraMemoryCost()); |
| 1032 | else |
| 1033 | DEBUG_LOG(LOGIDENTIFIER, "evicted " , initialBufferedSize - extraMemoryCost()); |
| 1034 | #endif |
| 1035 | } |
| 1036 | |
| 1037 | size_t SourceBuffer::maximumBufferSize() const |
| 1038 | { |
| 1039 | if (isRemoved()) |
| 1040 | return 0; |
| 1041 | |
| 1042 | auto* element = m_source->mediaElement(); |
| 1043 | if (!element) |
| 1044 | return 0; |
| 1045 | |
| 1046 | return element->maximumSourceBufferSize(*this); |
| 1047 | } |
| 1048 | |
| 1049 | VideoTrackList& SourceBuffer::videoTracks() |
| 1050 | { |
| 1051 | if (!m_videoTracks) |
| 1052 | m_videoTracks = VideoTrackList::create(m_source->mediaElement(), scriptExecutionContext()); |
| 1053 | return *m_videoTracks; |
| 1054 | } |
| 1055 | |
| 1056 | AudioTrackList& SourceBuffer::audioTracks() |
| 1057 | { |
| 1058 | if (!m_audioTracks) |
| 1059 | m_audioTracks = AudioTrackList::create(m_source->mediaElement(), scriptExecutionContext()); |
| 1060 | return *m_audioTracks; |
| 1061 | } |
| 1062 | |
| 1063 | TextTrackList& SourceBuffer::textTracks() |
| 1064 | { |
| 1065 | if (!m_textTracks) |
| 1066 | m_textTracks = TextTrackList::create(m_source->mediaElement(), scriptExecutionContext()); |
| 1067 | return *m_textTracks; |
| 1068 | } |
| 1069 | |
| 1070 | void SourceBuffer::setActive(bool active) |
| 1071 | { |
| 1072 | if (m_active == active) |
| 1073 | return; |
| 1074 | |
| 1075 | m_active = active; |
| 1076 | m_private->setActive(active); |
| 1077 | if (!isRemoved()) |
| 1078 | m_source->sourceBufferDidChangeActiveState(*this, active); |
| 1079 | } |
| 1080 | |
| 1081 | void SourceBuffer::sourceBufferPrivateDidReceiveInitializationSegment(const InitializationSegment& segment) |
| 1082 | { |
| 1083 | if (isRemoved()) |
| 1084 | return; |
| 1085 | |
| 1086 | ALWAYS_LOG(LOGIDENTIFIER); |
| 1087 | |
| 1088 | // 3.5.8 Initialization Segment Received (ctd) |
| 1089 | // https://rawgit.com/w3c/media-source/c3ad59c7a370d04430969ba73d18dc9bcde57a33/index.html#sourcebuffer-init-segment-received [Editor's Draft 09 January 2015] |
| 1090 | |
| 1091 | // 1. Update the duration attribute if it currently equals NaN: |
| 1092 | if (m_source->duration().isInvalid()) { |
| 1093 | // ↳ If the initialization segment contains a duration: |
| 1094 | // Run the duration change algorithm with new duration set to the duration in the initialization segment. |
| 1095 | // ↳ Otherwise: |
| 1096 | // Run the duration change algorithm with new duration set to positive Infinity. |
| 1097 | if (segment.duration.isValid() && !segment.duration.isIndefinite()) |
| 1098 | m_source->setDurationInternal(segment.duration); |
| 1099 | else |
| 1100 | m_source->setDurationInternal(MediaTime::positiveInfiniteTime()); |
| 1101 | } |
| 1102 | |
| 1103 | // 2. If the initialization segment has no audio, video, or text tracks, then run the append error algorithm |
| 1104 | // with the decode error parameter set to true and abort these steps. |
| 1105 | if (segment.audioTracks.isEmpty() && segment.videoTracks.isEmpty() && segment.textTracks.isEmpty()) { |
| 1106 | appendError(true); |
| 1107 | return; |
| 1108 | } |
| 1109 | |
| 1110 | // 3. If the first initialization segment flag is true, then run the following steps: |
| 1111 | if (m_receivedFirstInitializationSegment) { |
| 1112 | |
| 1113 | // 3.1. Verify the following properties. If any of the checks fail then run the append error algorithm |
| 1114 | // with the decode error parameter set to true and abort these steps. |
| 1115 | if (!validateInitializationSegment(segment)) { |
| 1116 | appendError(true); |
| 1117 | return; |
| 1118 | } |
| 1119 | // 3.2 Add the appropriate track descriptions from this initialization segment to each of the track buffers. |
| 1120 | ASSERT(segment.audioTracks.size() == audioTracks().length()); |
| 1121 | for (auto& audioTrackInfo : segment.audioTracks) { |
| 1122 | if (audioTracks().length() == 1) { |
| 1123 | audioTracks().item(0)->setPrivate(*audioTrackInfo.track); |
| 1124 | break; |
| 1125 | } |
| 1126 | |
| 1127 | auto audioTrack = audioTracks().getTrackById(audioTrackInfo.track->id()); |
| 1128 | ASSERT(audioTrack); |
| 1129 | audioTrack->setPrivate(*audioTrackInfo.track); |
| 1130 | } |
| 1131 | |
| 1132 | ASSERT(segment.videoTracks.size() == videoTracks().length()); |
| 1133 | for (auto& videoTrackInfo : segment.videoTracks) { |
| 1134 | if (videoTracks().length() == 1) { |
| 1135 | videoTracks().item(0)->setPrivate(*videoTrackInfo.track); |
| 1136 | break; |
| 1137 | } |
| 1138 | |
| 1139 | auto videoTrack = videoTracks().getTrackById(videoTrackInfo.track->id()); |
| 1140 | ASSERT(videoTrack); |
| 1141 | videoTrack->setPrivate(*videoTrackInfo.track); |
| 1142 | } |
| 1143 | |
| 1144 | ASSERT(segment.textTracks.size() == textTracks().length()); |
| 1145 | for (auto& textTrackInfo : segment.textTracks) { |
| 1146 | if (textTracks().length() == 1) { |
| 1147 | downcast<InbandTextTrack>(*textTracks().item(0)).setPrivate(*textTrackInfo.track); |
| 1148 | break; |
| 1149 | } |
| 1150 | |
| 1151 | auto textTrack = textTracks().getTrackById(textTrackInfo.track->id()); |
| 1152 | ASSERT(textTrack); |
| 1153 | downcast<InbandTextTrack>(*textTrack).setPrivate(*textTrackInfo.track); |
| 1154 | } |
| 1155 | |
| 1156 | // 3.3 Set the need random access point flag on all track buffers to true. |
| 1157 | for (auto& trackBuffer : m_trackBufferMap.values()) |
| 1158 | trackBuffer.needRandomAccessFlag = true; |
| 1159 | } |
| 1160 | |
| 1161 | // 4. Let active track flag equal false. |
| 1162 | bool activeTrackFlag = false; |
| 1163 | |
| 1164 | // 5. If the first initialization segment flag is false, then run the following steps: |
| 1165 | if (!m_receivedFirstInitializationSegment) { |
| 1166 | // 5.1 If the initialization segment contains tracks with codecs the user agent does not support, |
| 1167 | // then run the append error algorithm with the decode error parameter set to true and abort these steps. |
| 1168 | // NOTE: This check is the responsibility of the SourceBufferPrivate. |
| 1169 | |
| 1170 | // 5.2 For each audio track in the initialization segment, run following steps: |
| 1171 | for (auto& audioTrackInfo : segment.audioTracks) { |
| 1172 | // FIXME: Implement steps 5.2.1-5.2.8.1 as per Editor's Draft 09 January 2015, and reorder this |
| 1173 | // 5.2.1 Let new audio track be a new AudioTrack object. |
| 1174 | // 5.2.2 Generate a unique ID and assign it to the id property on new video track. |
| 1175 | auto newAudioTrack = AudioTrack::create(*this, *audioTrackInfo.track); |
| 1176 | newAudioTrack->setSourceBuffer(this); |
| 1177 | |
| 1178 | // 5.2.3 If audioTracks.length equals 0, then run the following steps: |
| 1179 | if (!audioTracks().length()) { |
| 1180 | // 5.2.3.1 Set the enabled property on new audio track to true. |
| 1181 | newAudioTrack->setEnabled(true); |
| 1182 | |
| 1183 | // 5.2.3.2 Set active track flag to true. |
| 1184 | activeTrackFlag = true; |
| 1185 | } |
| 1186 | |
| 1187 | // 5.2.4 Add new audio track to the audioTracks attribute on this SourceBuffer object. |
| 1188 | // 5.2.5 Queue a task to fire a trusted event named addtrack, that does not bubble and is |
| 1189 | // not cancelable, and that uses the TrackEvent interface, at the AudioTrackList object |
| 1190 | // referenced by the audioTracks attribute on this SourceBuffer object. |
| 1191 | audioTracks().append(newAudioTrack.copyRef()); |
| 1192 | |
| 1193 | // 5.2.6 Add new audio track to the audioTracks attribute on the HTMLMediaElement. |
| 1194 | // 5.2.7 Queue a task to fire a trusted event named addtrack, that does not bubble and is |
| 1195 | // not cancelable, and that uses the TrackEvent interface, at the AudioTrackList object |
| 1196 | // referenced by the audioTracks attribute on the HTMLMediaElement. |
| 1197 | m_source->mediaElement()->ensureAudioTracks().append(newAudioTrack.copyRef()); |
| 1198 | |
| 1199 | // 5.2.8 Create a new track buffer to store coded frames for this track. |
| 1200 | ASSERT(!m_trackBufferMap.contains(newAudioTrack->id())); |
| 1201 | auto& trackBuffer = m_trackBufferMap.add(newAudioTrack->id(), TrackBuffer()).iterator->value; |
| 1202 | |
| 1203 | // 5.2.9 Add the track description for this track to the track buffer. |
| 1204 | trackBuffer.description = audioTrackInfo.description; |
| 1205 | |
| 1206 | m_audioCodecs.append(trackBuffer.description->codec()); |
| 1207 | } |
| 1208 | |
| 1209 | // 5.3 For each video track in the initialization segment, run following steps: |
| 1210 | for (auto& videoTrackInfo : segment.videoTracks) { |
| 1211 | // FIXME: Implement steps 5.3.1-5.3.8.1 as per Editor's Draft 09 January 2015, and reorder this |
| 1212 | // 5.3.1 Let new video track be a new VideoTrack object. |
| 1213 | // 5.3.2 Generate a unique ID and assign it to the id property on new video track. |
| 1214 | auto newVideoTrack = VideoTrack::create(*this, *videoTrackInfo.track); |
| 1215 | newVideoTrack->setSourceBuffer(this); |
| 1216 | |
| 1217 | // 5.3.3 If videoTracks.length equals 0, then run the following steps: |
| 1218 | if (!videoTracks().length()) { |
| 1219 | // 5.3.3.1 Set the selected property on new video track to true. |
| 1220 | newVideoTrack->setSelected(true); |
| 1221 | |
| 1222 | // 5.3.3.2 Set active track flag to true. |
| 1223 | activeTrackFlag = true; |
| 1224 | } |
| 1225 | |
| 1226 | // 5.3.4 Add new video track to the videoTracks attribute on this SourceBuffer object. |
| 1227 | // 5.3.5 Queue a task to fire a trusted event named addtrack, that does not bubble and is |
| 1228 | // not cancelable, and that uses the TrackEvent interface, at the VideoTrackList object |
| 1229 | // referenced by the videoTracks attribute on this SourceBuffer object. |
| 1230 | videoTracks().append(newVideoTrack.copyRef()); |
| 1231 | |
| 1232 | // 5.3.6 Add new video track to the videoTracks attribute on the HTMLMediaElement. |
| 1233 | // 5.3.7 Queue a task to fire a trusted event named addtrack, that does not bubble and is |
| 1234 | // not cancelable, and that uses the TrackEvent interface, at the VideoTrackList object |
| 1235 | // referenced by the videoTracks attribute on the HTMLMediaElement. |
| 1236 | m_source->mediaElement()->ensureVideoTracks().append(newVideoTrack.copyRef()); |
| 1237 | |
| 1238 | // 5.3.8 Create a new track buffer to store coded frames for this track. |
| 1239 | ASSERT(!m_trackBufferMap.contains(newVideoTrack->id())); |
| 1240 | auto& trackBuffer = m_trackBufferMap.add(newVideoTrack->id(), TrackBuffer()).iterator->value; |
| 1241 | |
| 1242 | // 5.3.9 Add the track description for this track to the track buffer. |
| 1243 | trackBuffer.description = videoTrackInfo.description; |
| 1244 | |
| 1245 | m_videoCodecs.append(trackBuffer.description->codec()); |
| 1246 | } |
| 1247 | |
| 1248 | // 5.4 For each text track in the initialization segment, run following steps: |
| 1249 | for (auto& textTrackInfo : segment.textTracks) { |
| 1250 | auto& textTrackPrivate = *textTrackInfo.track; |
| 1251 | |
| 1252 | // FIXME: Implement steps 5.4.1-5.4.8.1 as per Editor's Draft 09 January 2015, and reorder this |
| 1253 | // 5.4.1 Let new text track be a new TextTrack object with its properties populated with the |
| 1254 | // appropriate information from the initialization segment. |
| 1255 | auto newTextTrack = InbandTextTrack::create(*scriptExecutionContext(), *this, textTrackPrivate); |
| 1256 | |
| 1257 | // 5.4.2 If the mode property on new text track equals "showing" or "hidden", then set active |
| 1258 | // track flag to true. |
| 1259 | if (textTrackPrivate.mode() != InbandTextTrackPrivate::Disabled) |
| 1260 | activeTrackFlag = true; |
| 1261 | |
| 1262 | // 5.4.3 Add new text track to the textTracks attribute on this SourceBuffer object. |
| 1263 | // 5.4.4 Queue a task to fire a trusted event named addtrack, that does not bubble and is |
| 1264 | // not cancelable, and that uses the TrackEvent interface, at textTracks attribute on this |
| 1265 | // SourceBuffer object. |
| 1266 | textTracks().append(newTextTrack.get()); |
| 1267 | |
| 1268 | // 5.4.5 Add new text track to the textTracks attribute on the HTMLMediaElement. |
| 1269 | // 5.4.6 Queue a task to fire a trusted event named addtrack, that does not bubble and is |
| 1270 | // not cancelable, and that uses the TrackEvent interface, at the TextTrackList object |
| 1271 | // referenced by the textTracks attribute on the HTMLMediaElement. |
| 1272 | m_source->mediaElement()->ensureTextTracks().append(WTFMove(newTextTrack)); |
| 1273 | |
| 1274 | // 5.4.7 Create a new track buffer to store coded frames for this track. |
| 1275 | ASSERT(!m_trackBufferMap.contains(textTrackPrivate.id())); |
| 1276 | auto& trackBuffer = m_trackBufferMap.add(textTrackPrivate.id(), TrackBuffer()).iterator->value; |
| 1277 | |
| 1278 | // 5.4.8 Add the track description for this track to the track buffer. |
| 1279 | trackBuffer.description = textTrackInfo.description; |
| 1280 | |
| 1281 | m_textCodecs.append(trackBuffer.description->codec()); |
| 1282 | } |
| 1283 | |
| 1284 | // 5.5 If active track flag equals true, then run the following steps: |
| 1285 | if (activeTrackFlag) { |
| 1286 | // 5.5.1 Add this SourceBuffer to activeSourceBuffers. |
| 1287 | // 5.5.2 Queue a task to fire a simple event named addsourcebuffer at activeSourceBuffers |
| 1288 | setActive(true); |
| 1289 | } |
| 1290 | |
| 1291 | // 5.6 Set first initialization segment flag to true. |
| 1292 | m_receivedFirstInitializationSegment = true; |
| 1293 | } |
| 1294 | |
| 1295 | // (Note: Issue #155 adds this step after step 5:) |
| 1296 | // 6. Set pending initialization segment for changeType flag to false. |
| 1297 | m_pendingInitializationSegmentForChangeType = false; |
| 1298 | |
| 1299 | // 6. If the HTMLMediaElement.readyState attribute is HAVE_NOTHING, then run the following steps: |
| 1300 | if (m_private->readyState() == MediaPlayer::HaveNothing) { |
| 1301 | // 6.1 If one or more objects in sourceBuffers have first initialization segment flag set to false, then abort these steps. |
| 1302 | for (auto& sourceBuffer : *m_source->sourceBuffers()) { |
| 1303 | if (!sourceBuffer->m_receivedFirstInitializationSegment) |
| 1304 | return; |
| 1305 | } |
| 1306 | |
| 1307 | // 6.2 Set the HTMLMediaElement.readyState attribute to HAVE_METADATA. |
| 1308 | // 6.3 Queue a task to fire a simple event named loadedmetadata at the media element. |
| 1309 | m_private->setReadyState(MediaPlayer::HaveMetadata); |
| 1310 | } |
| 1311 | |
| 1312 | // 7. If the active track flag equals true and the HTMLMediaElement.readyState |
| 1313 | // attribute is greater than HAVE_CURRENT_DATA, then set the HTMLMediaElement.readyState |
| 1314 | // attribute to HAVE_METADATA. |
| 1315 | if (activeTrackFlag && m_private->readyState() > MediaPlayer::HaveCurrentData) |
| 1316 | m_private->setReadyState(MediaPlayer::HaveMetadata); |
| 1317 | } |
| 1318 | |
| 1319 | bool SourceBuffer::validateInitializationSegment(const InitializationSegment& segment) |
| 1320 | { |
| 1321 | // FIXME: ordering of all 3.5.X (X>=7) functions needs to be updated to post-[24 July 2014 Editor's Draft] version |
| 1322 | // 3.5.8 Initialization Segment Received (ctd) |
| 1323 | // https://rawgit.com/w3c/media-source/c3ad59c7a370d04430969ba73d18dc9bcde57a33/index.html#sourcebuffer-init-segment-received [Editor's Draft 09 January 2015] |
| 1324 | |
| 1325 | // Note: those are checks from step 3.1 |
| 1326 | // * The number of audio, video, and text tracks match what was in the first initialization segment. |
| 1327 | if (segment.audioTracks.size() != audioTracks().length() |
| 1328 | || segment.videoTracks.size() != videoTracks().length() |
| 1329 | || segment.textTracks.size() != textTracks().length()) |
| 1330 | return false; |
| 1331 | |
| 1332 | // * The codecs for each track, match what was specified in the first initialization segment. |
| 1333 | // (Note: Issue #155 strikes out this check. For broad compatibility when this experimental feature |
| 1334 | // is not enabled, only perform this check if the "pending initialization segment for changeType flag" |
| 1335 | // is not set.) |
| 1336 | for (auto& audioTrackInfo : segment.audioTracks) { |
| 1337 | if (m_audioCodecs.contains(audioTrackInfo.description->codec())) |
| 1338 | continue; |
| 1339 | |
| 1340 | if (!m_pendingInitializationSegmentForChangeType) |
| 1341 | return false; |
| 1342 | |
| 1343 | m_audioCodecs.append(audioTrackInfo.description->codec()); |
| 1344 | } |
| 1345 | |
| 1346 | for (auto& videoTrackInfo : segment.videoTracks) { |
| 1347 | if (m_videoCodecs.contains(videoTrackInfo.description->codec())) |
| 1348 | continue; |
| 1349 | |
| 1350 | if (!m_pendingInitializationSegmentForChangeType) |
| 1351 | return false; |
| 1352 | |
| 1353 | m_videoCodecs.append(videoTrackInfo.description->codec()); |
| 1354 | } |
| 1355 | |
| 1356 | for (auto& textTrackInfo : segment.textTracks) { |
| 1357 | if (m_textCodecs.contains(textTrackInfo.description->codec())) |
| 1358 | continue; |
| 1359 | |
| 1360 | if (!m_pendingInitializationSegmentForChangeType) |
| 1361 | return false; |
| 1362 | |
| 1363 | m_textCodecs.append(textTrackInfo.description->codec()); |
| 1364 | } |
| 1365 | |
| 1366 | // * If more than one track for a single type are present (ie 2 audio tracks), then the Track |
| 1367 | // IDs match the ones in the first initialization segment. |
| 1368 | if (segment.audioTracks.size() >= 2) { |
| 1369 | for (auto& audioTrackInfo : segment.audioTracks) { |
| 1370 | if (!m_trackBufferMap.contains(audioTrackInfo.track->id())) |
| 1371 | return false; |
| 1372 | } |
| 1373 | } |
| 1374 | |
| 1375 | if (segment.videoTracks.size() >= 2) { |
| 1376 | for (auto& videoTrackInfo : segment.videoTracks) { |
| 1377 | if (!m_trackBufferMap.contains(videoTrackInfo.track->id())) |
| 1378 | return false; |
| 1379 | } |
| 1380 | } |
| 1381 | |
| 1382 | if (segment.textTracks.size() >= 2) { |
| 1383 | for (auto& textTrackInfo : segment.videoTracks) { |
| 1384 | if (!m_trackBufferMap.contains(textTrackInfo.track->id())) |
| 1385 | return false; |
| 1386 | } |
| 1387 | } |
| 1388 | |
| 1389 | return true; |
| 1390 | } |
| 1391 | |
| 1392 | class SampleLessThanComparator { |
| 1393 | public: |
| 1394 | bool operator()(std::pair<MediaTime, RefPtr<MediaSample>> value1, std::pair<MediaTime, RefPtr<MediaSample>> value2) |
| 1395 | { |
| 1396 | return value1.first < value2.first; |
| 1397 | } |
| 1398 | |
| 1399 | bool operator()(MediaTime value1, std::pair<MediaTime, RefPtr<MediaSample>> value2) |
| 1400 | { |
| 1401 | return value1 < value2.first; |
| 1402 | } |
| 1403 | |
| 1404 | bool operator()(std::pair<MediaTime, RefPtr<MediaSample>> value1, MediaTime value2) |
| 1405 | { |
| 1406 | return value1.first < value2; |
| 1407 | } |
| 1408 | }; |
| 1409 | |
| 1410 | void SourceBuffer::appendError(bool decodeErrorParam) |
| 1411 | { |
| 1412 | // 3.5.3 Append Error Algorithm |
| 1413 | // https://rawgit.com/w3c/media-source/c3ad59c7a370d04430969ba73d18dc9bcde57a33/index.html#sourcebuffer-append-error [Editor's Draft 09 January 2015] |
| 1414 | |
| 1415 | ASSERT(m_updating); |
| 1416 | // 1. Run the reset parser state algorithm. |
| 1417 | resetParserState(); |
| 1418 | |
| 1419 | // 2. Set the updating attribute to false. |
| 1420 | m_updating = false; |
| 1421 | |
| 1422 | // 3. Queue a task to fire a simple event named error at this SourceBuffer object. |
| 1423 | scheduleEvent(eventNames().errorEvent); |
| 1424 | |
| 1425 | // 4. Queue a task to fire a simple event named updateend at this SourceBuffer object. |
| 1426 | scheduleEvent(eventNames().updateendEvent); |
| 1427 | |
| 1428 | // 5. If decode error is true, then run the end of stream algorithm with the error parameter set to "decode". |
| 1429 | if (decodeErrorParam) |
| 1430 | m_source->streamEndedWithError(MediaSource::EndOfStreamError::Decode); |
| 1431 | } |
| 1432 | |
| 1433 | void SourceBuffer::sourceBufferPrivateDidReceiveSample(MediaSample& sample) |
| 1434 | { |
| 1435 | if (isRemoved()) |
| 1436 | return; |
| 1437 | |
| 1438 | // 3.5.1 Segment Parser Loop |
| 1439 | // 6.1 If the first initialization segment received flag is false, (Note: Issue # 155 & changeType() |
| 1440 | // algorithm) or the pending initialization segment for changeType flag is true, (End note) |
| 1441 | // then run the append error algorithm |
| 1442 | // with the decode error parameter set to true and abort this algorithm. |
| 1443 | // Note: current design makes SourceBuffer somehow ignorant of append state - it's more a thing |
| 1444 | // of SourceBufferPrivate. That's why this check can't really be done in appendInternal. |
| 1445 | // unless we force some kind of design with state machine switching. |
| 1446 | if (!m_receivedFirstInitializationSegment || m_pendingInitializationSegmentForChangeType) { |
| 1447 | appendError(true); |
| 1448 | return; |
| 1449 | } |
| 1450 | |
| 1451 | // 3.5.8 Coded Frame Processing |
| 1452 | // http://www.w3.org/TR/media-source/#sourcebuffer-coded-frame-processing |
| 1453 | |
| 1454 | // When complete coded frames have been parsed by the segment parser loop then the following steps |
| 1455 | // are run: |
| 1456 | // 1. For each coded frame in the media segment run the following steps: |
| 1457 | // 1.1. Loop Top |
| 1458 | do { |
| 1459 | MediaTime presentationTimestamp; |
| 1460 | MediaTime decodeTimestamp; |
| 1461 | |
| 1462 | // NOTE: this is out-of-order, but we need the timescale from the |
| 1463 | // sample's duration for timestamp generation. |
| 1464 | // 1.2 Let frame duration be a double precision floating point representation of the coded frame's |
| 1465 | // duration in seconds. |
| 1466 | MediaTime frameDuration = sample.duration(); |
| 1467 | |
| 1468 | if (m_shouldGenerateTimestamps) { |
| 1469 | // ↳ If generate timestamps flag equals true: |
| 1470 | // 1. Let presentation timestamp equal 0. |
| 1471 | // NOTE: Use the duration timscale for the presentation timestamp, as this will eliminate |
| 1472 | // timescale rounding when generating timestamps. |
| 1473 | presentationTimestamp = { 0, frameDuration.timeScale() }; |
| 1474 | |
| 1475 | // 2. Let decode timestamp equal 0. |
| 1476 | decodeTimestamp = { 0, frameDuration.timeScale() }; |
| 1477 | } else { |
| 1478 | // ↳ Otherwise: |
| 1479 | // 1. Let presentation timestamp be a double precision floating point representation of |
| 1480 | // the coded frame's presentation timestamp in seconds. |
| 1481 | presentationTimestamp = sample.presentationTime(); |
| 1482 | |
| 1483 | // 2. Let decode timestamp be a double precision floating point representation of the coded frame's |
| 1484 | // decode timestamp in seconds. |
| 1485 | decodeTimestamp = sample.decodeTime(); |
| 1486 | } |
| 1487 | |
| 1488 | // 1.3 If mode equals "sequence" and group start timestamp is set, then run the following steps: |
| 1489 | if (m_mode == AppendMode::Sequence && m_groupStartTimestamp.isValid()) { |
| 1490 | // 1.3.1 Set timestampOffset equal to group start timestamp - presentation timestamp. |
| 1491 | m_timestampOffset = m_groupStartTimestamp; |
| 1492 | |
| 1493 | for (auto& trackBuffer : m_trackBufferMap.values()) { |
| 1494 | trackBuffer.lastFrameTimescale = 0; |
| 1495 | trackBuffer.roundedTimestampOffset = MediaTime::invalidTime(); |
| 1496 | } |
| 1497 | |
| 1498 | // 1.3.2 Set group end timestamp equal to group start timestamp. |
| 1499 | m_groupEndTimestamp = m_groupStartTimestamp; |
| 1500 | |
| 1501 | // 1.3.3 Set the need random access point flag on all track buffers to true. |
| 1502 | for (auto& trackBuffer : m_trackBufferMap.values()) |
| 1503 | trackBuffer.needRandomAccessFlag = true; |
| 1504 | |
| 1505 | // 1.3.4 Unset group start timestamp. |
| 1506 | m_groupStartTimestamp = MediaTime::invalidTime(); |
| 1507 | } |
| 1508 | |
| 1509 | // NOTE: this is out-of-order, but we need TrackBuffer to be able to cache the results of timestamp offset rounding |
| 1510 | // 1.5 Let track buffer equal the track buffer that the coded frame will be added to. |
| 1511 | AtomicString trackID = sample.trackID(); |
| 1512 | auto it = m_trackBufferMap.find(trackID); |
| 1513 | if (it == m_trackBufferMap.end()) { |
| 1514 | // The client managed to append a sample with a trackID not present in the initialization |
| 1515 | // segment. This would be a good place to post an message to the developer console. |
| 1516 | didDropSample(); |
| 1517 | return; |
| 1518 | } |
| 1519 | TrackBuffer& trackBuffer = it->value; |
| 1520 | |
| 1521 | MediaTime microsecond(1, 1000000); |
| 1522 | |
| 1523 | auto roundTowardsTimeScaleWithRoundingMargin = [] (const MediaTime& time, uint32_t timeScale, const MediaTime& roundingMargin) { |
| 1524 | while (true) { |
| 1525 | MediaTime roundedTime = time.toTimeScale(timeScale); |
| 1526 | if (abs(roundedTime - time) < roundingMargin || timeScale >= MediaTime::MaximumTimeScale) |
| 1527 | return roundedTime; |
| 1528 | |
| 1529 | if (!WTF::safeMultiply(timeScale, 2, timeScale) || timeScale > MediaTime::MaximumTimeScale) |
| 1530 | timeScale = MediaTime::MaximumTimeScale; |
| 1531 | } |
| 1532 | }; |
| 1533 | |
| 1534 | // 1.4 If timestampOffset is not 0, then run the following steps: |
| 1535 | if (m_timestampOffset) { |
| 1536 | if (!trackBuffer.roundedTimestampOffset.isValid() || presentationTimestamp.timeScale() != trackBuffer.lastFrameTimescale) { |
| 1537 | trackBuffer.lastFrameTimescale = presentationTimestamp.timeScale(); |
| 1538 | trackBuffer.roundedTimestampOffset = roundTowardsTimeScaleWithRoundingMargin(m_timestampOffset, trackBuffer.lastFrameTimescale, microsecond); |
| 1539 | } |
| 1540 | |
| 1541 | // 1.4.1 Add timestampOffset to the presentation timestamp. |
| 1542 | presentationTimestamp += trackBuffer.roundedTimestampOffset; |
| 1543 | |
| 1544 | // 1.4.2 Add timestampOffset to the decode timestamp. |
| 1545 | decodeTimestamp += trackBuffer.roundedTimestampOffset; |
| 1546 | } |
| 1547 | |
| 1548 | // 1.6 ↳ If last decode timestamp for track buffer is set and decode timestamp is less than last |
| 1549 | // decode timestamp: |
| 1550 | // OR |
| 1551 | // ↳ If last decode timestamp for track buffer is set and the difference between decode timestamp and |
| 1552 | // last decode timestamp is greater than 2 times last frame duration: |
| 1553 | MediaTime decodeDurationToCheck = trackBuffer.greatestDecodeDuration; |
| 1554 | |
| 1555 | if (decodeDurationToCheck.isValid() && trackBuffer.lastFrameDuration.isValid() |
| 1556 | && (trackBuffer.lastFrameDuration > decodeDurationToCheck)) |
| 1557 | decodeDurationToCheck = trackBuffer.lastFrameDuration; |
| 1558 | |
| 1559 | if (trackBuffer.lastDecodeTimestamp.isValid() && (decodeTimestamp < trackBuffer.lastDecodeTimestamp |
| 1560 | || (decodeDurationToCheck.isValid() && abs(decodeTimestamp - trackBuffer.lastDecodeTimestamp) > (decodeDurationToCheck * 2)))) { |
| 1561 | |
| 1562 | // 1.6.1: |
| 1563 | if (m_mode == AppendMode::Segments) { |
| 1564 | // ↳ If mode equals "segments": |
| 1565 | // Set group end timestamp to presentation timestamp. |
| 1566 | m_groupEndTimestamp = presentationTimestamp; |
| 1567 | } else { |
| 1568 | // ↳ If mode equals "sequence": |
| 1569 | // Set group start timestamp equal to the group end timestamp. |
| 1570 | m_groupStartTimestamp = m_groupEndTimestamp; |
| 1571 | } |
| 1572 | |
| 1573 | for (auto& trackBuffer : m_trackBufferMap.values()) { |
| 1574 | // 1.6.2 Unset the last decode timestamp on all track buffers. |
| 1575 | trackBuffer.lastDecodeTimestamp = MediaTime::invalidTime(); |
| 1576 | // 1.6.3 Unset the last frame duration on all track buffers. |
| 1577 | trackBuffer.greatestDecodeDuration = MediaTime::invalidTime(); |
| 1578 | trackBuffer.lastFrameDuration = MediaTime::invalidTime(); |
| 1579 | // 1.6.4 Unset the highest presentation timestamp on all track buffers. |
| 1580 | trackBuffer.highestPresentationTimestamp = MediaTime::invalidTime(); |
| 1581 | // 1.6.5 Set the need random access point flag on all track buffers to true. |
| 1582 | trackBuffer.needRandomAccessFlag = true; |
| 1583 | } |
| 1584 | |
| 1585 | // 1.6.6 Jump to the Loop Top step above to restart processing of the current coded frame. |
| 1586 | continue; |
| 1587 | } |
| 1588 | |
| 1589 | if (m_mode == AppendMode::Sequence) { |
| 1590 | // Use the generated timestamps instead of the sample's timestamps. |
| 1591 | sample.setTimestamps(presentationTimestamp, decodeTimestamp); |
| 1592 | } else if (trackBuffer.roundedTimestampOffset) { |
| 1593 | // Reflect the timestamp offset into the sample. |
| 1594 | sample.offsetTimestampsBy(trackBuffer.roundedTimestampOffset); |
| 1595 | } |
| 1596 | |
| 1597 | DEBUG_LOG(LOGIDENTIFIER, sample); |
| 1598 | |
| 1599 | // 1.7 Let frame end timestamp equal the sum of presentation timestamp and frame duration. |
| 1600 | MediaTime frameEndTimestamp = presentationTimestamp + frameDuration; |
| 1601 | |
| 1602 | // 1.8 If presentation timestamp is less than appendWindowStart, then set the need random access |
| 1603 | // point flag to true, drop the coded frame, and jump to the top of the loop to start processing |
| 1604 | // the next coded frame. |
| 1605 | // 1.9 If frame end timestamp is greater than appendWindowEnd, then set the need random access |
| 1606 | // point flag to true, drop the coded frame, and jump to the top of the loop to start processing |
| 1607 | // the next coded frame. |
| 1608 | if (presentationTimestamp < m_appendWindowStart || frameEndTimestamp > m_appendWindowEnd) { |
| 1609 | trackBuffer.needRandomAccessFlag = true; |
| 1610 | didDropSample(); |
| 1611 | return; |
| 1612 | } |
| 1613 | |
| 1614 | |
| 1615 | // 1.10 If the decode timestamp is less than the presentation start time, then run the end of stream |
| 1616 | // algorithm with the error parameter set to "decode", and abort these steps. |
| 1617 | // NOTE: Until <https://www.w3.org/Bugs/Public/show_bug.cgi?id=27487> is resolved, we will only check |
| 1618 | // the presentation timestamp. |
| 1619 | MediaTime presentationStartTime = MediaTime::zeroTime(); |
| 1620 | if (presentationTimestamp < presentationStartTime) { |
| 1621 | ERROR_LOG(LOGIDENTIFIER, "failing because presentationTimestamp (" , presentationTimestamp, ") < presentationStartTime (" , presentationStartTime, ")" ); |
| 1622 | m_source->streamEndedWithError(MediaSource::EndOfStreamError::Decode); |
| 1623 | return; |
| 1624 | } |
| 1625 | |
| 1626 | // 1.11 If the need random access point flag on track buffer equals true, then run the following steps: |
| 1627 | if (trackBuffer.needRandomAccessFlag) { |
| 1628 | // 1.11.1 If the coded frame is not a random access point, then drop the coded frame and jump |
| 1629 | // to the top of the loop to start processing the next coded frame. |
| 1630 | if (!sample.isSync()) { |
| 1631 | didDropSample(); |
| 1632 | return; |
| 1633 | } |
| 1634 | |
| 1635 | // 1.11.2 Set the need random access point flag on track buffer to false. |
| 1636 | trackBuffer.needRandomAccessFlag = false; |
| 1637 | } |
| 1638 | |
| 1639 | // 1.12 Let spliced audio frame be an unset variable for holding audio splice information |
| 1640 | // 1.13 Let spliced timed text frame be an unset variable for holding timed text splice information |
| 1641 | // FIXME: Add support for sample splicing. |
| 1642 | |
| 1643 | SampleMap erasedSamples; |
| 1644 | |
| 1645 | // 1.14 If last decode timestamp for track buffer is unset and presentation timestamp falls |
| 1646 | // falls within the presentation interval of a coded frame in track buffer, then run the |
| 1647 | // following steps: |
| 1648 | if (trackBuffer.lastDecodeTimestamp.isInvalid()) { |
| 1649 | auto iter = trackBuffer.samples.presentationOrder().findSampleContainingPresentationTime(presentationTimestamp); |
| 1650 | if (iter != trackBuffer.samples.presentationOrder().end()) { |
| 1651 | // 1.14.1 Let overlapped frame be the coded frame in track buffer that matches the condition above. |
| 1652 | RefPtr<MediaSample> overlappedFrame = iter->second; |
| 1653 | |
| 1654 | // 1.14.2 If track buffer contains audio coded frames: |
| 1655 | // Run the audio splice frame algorithm and if a splice frame is returned, assign it to |
| 1656 | // spliced audio frame. |
| 1657 | // FIXME: Add support for sample splicing. |
| 1658 | |
| 1659 | // If track buffer contains video coded frames: |
| 1660 | if (trackBuffer.description && trackBuffer.description->isVideo()) { |
| 1661 | // 1.14.2.1 Let overlapped frame presentation timestamp equal the presentation timestamp |
| 1662 | // of overlapped frame. |
| 1663 | MediaTime overlappedFramePresentationTimestamp = overlappedFrame->presentationTime(); |
| 1664 | |
| 1665 | // 1.14.2.2 Let remove window timestamp equal overlapped frame presentation timestamp |
| 1666 | // plus 1 microsecond. |
| 1667 | MediaTime removeWindowTimestamp = overlappedFramePresentationTimestamp + microsecond; |
| 1668 | |
| 1669 | // 1.14.2.3 If the presentation timestamp is less than the remove window timestamp, |
| 1670 | // then remove overlapped frame and any coded frames that depend on it from track buffer. |
| 1671 | if (presentationTimestamp < removeWindowTimestamp) |
| 1672 | erasedSamples.addSample(*iter->second); |
| 1673 | } |
| 1674 | |
| 1675 | // If track buffer contains timed text coded frames: |
| 1676 | // Run the text splice frame algorithm and if a splice frame is returned, assign it to spliced timed text frame. |
| 1677 | // FIXME: Add support for sample splicing. |
| 1678 | } |
| 1679 | } |
| 1680 | |
| 1681 | // 1.15 Remove existing coded frames in track buffer: |
| 1682 | // If highest presentation timestamp for track buffer is not set: |
| 1683 | if (trackBuffer.highestPresentationTimestamp.isInvalid()) { |
| 1684 | // Remove all coded frames from track buffer that have a presentation timestamp greater than or |
| 1685 | // equal to presentation timestamp and less than frame end timestamp. |
| 1686 | auto iter_pair = trackBuffer.samples.presentationOrder().findSamplesBetweenPresentationTimes(presentationTimestamp, frameEndTimestamp); |
| 1687 | if (iter_pair.first != trackBuffer.samples.presentationOrder().end()) |
| 1688 | erasedSamples.addRange(iter_pair.first, iter_pair.second); |
| 1689 | } |
| 1690 | |
| 1691 | // There are many files out there where the frame times are not perfectly contiguous and may have small overlaps |
| 1692 | // between the beginning of a frame and the end of the previous one; therefore a tolerance is needed whenever |
| 1693 | // durations are considered. |
| 1694 | // For instance, most WebM files are muxed rounded to the millisecond (the default TimecodeScale of the format) |
| 1695 | // but their durations use a finer timescale (causing a sub-millisecond overlap). More rarely, there are also |
| 1696 | // MP4 files with slightly off tfdt boxes, presenting a similar problem at the beginning of each fragment. |
| 1697 | const MediaTime contiguousFrameTolerance = MediaTime(1, 1000); |
| 1698 | |
| 1699 | // If highest presentation timestamp for track buffer is set and less than or equal to presentation timestamp |
| 1700 | if (trackBuffer.highestPresentationTimestamp.isValid() && trackBuffer.highestPresentationTimestamp - contiguousFrameTolerance <= presentationTimestamp) { |
| 1701 | // Remove all coded frames from track buffer that have a presentation timestamp greater than highest |
| 1702 | // presentation timestamp and less than or equal to frame end timestamp. |
| 1703 | do { |
| 1704 | // NOTE: Searching from the end of the trackBuffer will be vastly more efficient if the search range is |
| 1705 | // near the end of the buffered range. Use a linear-backwards search if the search range is within one |
| 1706 | // frame duration of the end: |
| 1707 | unsigned bufferedLength = trackBuffer.buffered.length(); |
| 1708 | if (!bufferedLength) |
| 1709 | break; |
| 1710 | |
| 1711 | MediaTime highestBufferedTime = trackBuffer.buffered.maximumBufferedTime(); |
| 1712 | MediaTime eraseBeginTime = trackBuffer.highestPresentationTimestamp - contiguousFrameTolerance; |
| 1713 | MediaTime eraseEndTime = frameEndTimestamp - contiguousFrameTolerance; |
| 1714 | |
| 1715 | PresentationOrderSampleMap::iterator_range range; |
| 1716 | if (highestBufferedTime - trackBuffer.highestPresentationTimestamp < trackBuffer.lastFrameDuration) |
| 1717 | // If the new frame is at the end of the buffered ranges, perform a sequential scan from end (O(1)). |
| 1718 | range = trackBuffer.samples.presentationOrder().findSamplesBetweenPresentationTimesFromEnd(eraseBeginTime, eraseEndTime); |
| 1719 | else |
| 1720 | // In any other case, perform a binary search (O(log(n)). |
| 1721 | range = trackBuffer.samples.presentationOrder().findSamplesBetweenPresentationTimes(eraseBeginTime, eraseEndTime); |
| 1722 | |
| 1723 | if (range.first != trackBuffer.samples.presentationOrder().end()) |
| 1724 | erasedSamples.addRange(range.first, range.second); |
| 1725 | } while(false); |
| 1726 | } |
| 1727 | |
| 1728 | // 1.16 Remove decoding dependencies of the coded frames removed in the previous step: |
| 1729 | DecodeOrderSampleMap::MapType dependentSamples; |
| 1730 | if (!erasedSamples.empty()) { |
| 1731 | // If detailed information about decoding dependencies is available: |
| 1732 | // FIXME: Add support for detailed dependency information |
| 1733 | |
| 1734 | // Otherwise: Remove all coded frames between the coded frames removed in the previous step |
| 1735 | // and the next random access point after those removed frames. |
| 1736 | auto firstDecodeIter = trackBuffer.samples.decodeOrder().findSampleWithDecodeKey(erasedSamples.decodeOrder().begin()->first); |
| 1737 | auto lastDecodeIter = trackBuffer.samples.decodeOrder().findSampleWithDecodeKey(erasedSamples.decodeOrder().rbegin()->first); |
| 1738 | auto nextSyncIter = trackBuffer.samples.decodeOrder().findSyncSampleAfterDecodeIterator(lastDecodeIter); |
| 1739 | dependentSamples.insert(firstDecodeIter, nextSyncIter); |
| 1740 | |
| 1741 | // NOTE: in the case of b-frames, the previous step may leave in place samples whose presentation |
| 1742 | // timestamp < presentationTime, but whose decode timestamp >= decodeTime. These will eventually cause |
| 1743 | // a decode error if left in place, so remove these samples as well. |
| 1744 | DecodeOrderSampleMap::KeyType decodeKey(sample.decodeTime(), sample.presentationTime()); |
| 1745 | auto samplesWithHigherDecodeTimes = trackBuffer.samples.decodeOrder().findSamplesBetweenDecodeKeys(decodeKey, erasedSamples.decodeOrder().begin()->first); |
| 1746 | if (samplesWithHigherDecodeTimes.first != samplesWithHigherDecodeTimes.second) |
| 1747 | dependentSamples.insert(samplesWithHigherDecodeTimes.first, samplesWithHigherDecodeTimes.second); |
| 1748 | |
| 1749 | PlatformTimeRanges erasedRanges = removeSamplesFromTrackBuffer(dependentSamples, trackBuffer, this, "sourceBufferPrivateDidReceiveSample" ); |
| 1750 | |
| 1751 | // Only force the TrackBuffer to re-enqueue if the removed ranges overlap with enqueued and possibly |
| 1752 | // not yet displayed samples. |
| 1753 | MediaTime currentMediaTime = m_source->currentTime(); |
| 1754 | if (trackBuffer.lastEnqueuedPresentationTime.isValid() && currentMediaTime < trackBuffer.lastEnqueuedPresentationTime) { |
| 1755 | PlatformTimeRanges possiblyEnqueuedRanges(currentMediaTime, trackBuffer.lastEnqueuedPresentationTime); |
| 1756 | possiblyEnqueuedRanges.intersectWith(erasedRanges); |
| 1757 | if (possiblyEnqueuedRanges.length()) |
| 1758 | trackBuffer.needsReenqueueing = true; |
| 1759 | } |
| 1760 | |
| 1761 | erasedRanges.invert(); |
| 1762 | trackBuffer.buffered.intersectWith(erasedRanges); |
| 1763 | setBufferedDirty(true); |
| 1764 | } |
| 1765 | |
| 1766 | // 1.17 If spliced audio frame is set: |
| 1767 | // Add spliced audio frame to the track buffer. |
| 1768 | // If spliced timed text frame is set: |
| 1769 | // Add spliced timed text frame to the track buffer. |
| 1770 | // FIXME: Add support for sample splicing. |
| 1771 | |
| 1772 | // Otherwise: |
| 1773 | // Add the coded frame with the presentation timestamp, decode timestamp, and frame duration to the track buffer. |
| 1774 | trackBuffer.samples.addSample(sample); |
| 1775 | |
| 1776 | // Note: The terminology here is confusing: "enqueuing" means providing a frame to the inner media framework. |
| 1777 | // First, frames are inserted in the decode queue; later, at the end of the append all the frames in the decode |
| 1778 | // queue are "enqueued" (sent to the inner media framework) in `provideMediaData()`. |
| 1779 | // |
| 1780 | // In order to check whether a frame should be added to the decode queue we check whether it starts after the |
| 1781 | // lastEnqueuedDecodeKey. |
| 1782 | DecodeOrderSampleMap::KeyType decodeKey(sample.decodeTime(), sample.presentationTime()); |
| 1783 | if (trackBuffer.lastEnqueuedDecodeKey.first.isInvalid() || decodeKey > trackBuffer.lastEnqueuedDecodeKey) { |
| 1784 | trackBuffer.decodeQueue.insert(DecodeOrderSampleMap::MapType::value_type(decodeKey, &sample)); |
| 1785 | } |
| 1786 | |
| 1787 | // NOTE: the spec considers "Coded Frame Duration" to be the presentation duration, but this is not necessarily equal |
| 1788 | // to the decoded duration. When comparing deltas between decode timestamps, the decode duration, not the presentation. |
| 1789 | if (trackBuffer.lastDecodeTimestamp.isValid()) { |
| 1790 | MediaTime lastDecodeDuration = decodeTimestamp - trackBuffer.lastDecodeTimestamp; |
| 1791 | if (!trackBuffer.greatestDecodeDuration.isValid() || lastDecodeDuration > trackBuffer.greatestDecodeDuration) |
| 1792 | trackBuffer.greatestDecodeDuration = lastDecodeDuration; |
| 1793 | } |
| 1794 | |
| 1795 | // 1.18 Set last decode timestamp for track buffer to decode timestamp. |
| 1796 | trackBuffer.lastDecodeTimestamp = decodeTimestamp; |
| 1797 | |
| 1798 | // 1.19 Set last frame duration for track buffer to frame duration. |
| 1799 | trackBuffer.lastFrameDuration = frameDuration; |
| 1800 | |
| 1801 | // 1.20 If highest presentation timestamp for track buffer is unset or frame end timestamp is greater |
| 1802 | // than highest presentation timestamp, then set highest presentation timestamp for track buffer |
| 1803 | // to frame end timestamp. |
| 1804 | if (trackBuffer.highestPresentationTimestamp.isInvalid() || frameEndTimestamp > trackBuffer.highestPresentationTimestamp) |
| 1805 | trackBuffer.highestPresentationTimestamp = frameEndTimestamp; |
| 1806 | |
| 1807 | // 1.21 If frame end timestamp is greater than group end timestamp, then set group end timestamp equal |
| 1808 | // to frame end timestamp. |
| 1809 | if (m_groupEndTimestamp.isInvalid() || frameEndTimestamp > m_groupEndTimestamp) |
| 1810 | m_groupEndTimestamp = frameEndTimestamp; |
| 1811 | |
| 1812 | // 1.22 If generate timestamps flag equals true, then set timestampOffset equal to frame end timestamp. |
| 1813 | if (m_shouldGenerateTimestamps) { |
| 1814 | m_timestampOffset = frameEndTimestamp; |
| 1815 | for (auto& trackBuffer : m_trackBufferMap.values()) { |
| 1816 | trackBuffer.lastFrameTimescale = 0; |
| 1817 | trackBuffer.roundedTimestampOffset = MediaTime::invalidTime(); |
| 1818 | } |
| 1819 | } |
| 1820 | |
| 1821 | // Eliminate small gaps between buffered ranges by coalescing |
| 1822 | // disjoint ranges separated by less than a "fudge factor". |
| 1823 | auto presentationEndTime = presentationTimestamp + frameDuration; |
| 1824 | auto nearestToPresentationStartTime = trackBuffer.buffered.nearest(presentationTimestamp); |
| 1825 | if (nearestToPresentationStartTime.isValid() && (presentationTimestamp - nearestToPresentationStartTime).isBetween(MediaTime::zeroTime(), MediaSource::currentTimeFudgeFactor())) |
| 1826 | presentationTimestamp = nearestToPresentationStartTime; |
| 1827 | |
| 1828 | auto nearestToPresentationEndTime = trackBuffer.buffered.nearest(presentationEndTime); |
| 1829 | if (nearestToPresentationEndTime.isValid() && (nearestToPresentationEndTime - presentationEndTime).isBetween(MediaTime::zeroTime(), MediaSource::currentTimeFudgeFactor())) |
| 1830 | presentationEndTime = nearestToPresentationEndTime; |
| 1831 | |
| 1832 | trackBuffer.buffered.add(presentationTimestamp, presentationEndTime); |
| 1833 | m_bufferedSinceLastMonitor += frameDuration.toDouble(); |
| 1834 | setBufferedDirty(true); |
| 1835 | |
| 1836 | break; |
| 1837 | } while (1); |
| 1838 | |
| 1839 | // Steps 2-4 will be handled by MediaSource::monitorSourceBuffers() |
| 1840 | |
| 1841 | // 5. If the media segment contains data beyond the current duration, then run the duration change algorithm with new |
| 1842 | // duration set to the maximum of the current duration and the group end timestamp. |
| 1843 | if (m_groupEndTimestamp > m_source->duration()) |
| 1844 | m_source->setDurationInternal(m_groupEndTimestamp); |
| 1845 | } |
| 1846 | |
| 1847 | bool SourceBuffer::hasAudio() const |
| 1848 | { |
| 1849 | return m_audioTracks && m_audioTracks->length(); |
| 1850 | } |
| 1851 | |
| 1852 | bool SourceBuffer::hasVideo() const |
| 1853 | { |
| 1854 | return m_videoTracks && m_videoTracks->length(); |
| 1855 | } |
| 1856 | |
| 1857 | bool SourceBuffer::sourceBufferPrivateHasAudio() const |
| 1858 | { |
| 1859 | return hasAudio(); |
| 1860 | } |
| 1861 | |
| 1862 | bool SourceBuffer::sourceBufferPrivateHasVideo() const |
| 1863 | { |
| 1864 | return hasVideo(); |
| 1865 | } |
| 1866 | |
| 1867 | void SourceBuffer::videoTrackSelectedChanged(VideoTrack& track) |
| 1868 | { |
| 1869 | // 2.4.5 Changes to selected/enabled track state |
| 1870 | // If the selected video track changes, then run the following steps: |
| 1871 | // 1. If the SourceBuffer associated with the previously selected video track is not associated with |
| 1872 | // any other enabled tracks, run the following steps: |
| 1873 | if (!track.selected() |
| 1874 | && (!m_videoTracks || !m_videoTracks->isAnyTrackEnabled()) |
| 1875 | && (!m_audioTracks || !m_audioTracks->isAnyTrackEnabled()) |
| 1876 | && (!m_textTracks || !m_textTracks->isAnyTrackEnabled())) { |
| 1877 | // 1.1 Remove the SourceBuffer from activeSourceBuffers. |
| 1878 | // 1.2 Queue a task to fire a simple event named removesourcebuffer at activeSourceBuffers |
| 1879 | setActive(false); |
| 1880 | } else if (track.selected()) { |
| 1881 | // 2. If the SourceBuffer associated with the newly selected video track is not already in activeSourceBuffers, |
| 1882 | // run the following steps: |
| 1883 | // 2.1 Add the SourceBuffer to activeSourceBuffers. |
| 1884 | // 2.2 Queue a task to fire a simple event named addsourcebuffer at activeSourceBuffers |
| 1885 | setActive(true); |
| 1886 | } |
| 1887 | |
| 1888 | if (m_videoTracks && m_videoTracks->contains(track)) |
| 1889 | m_videoTracks->scheduleChangeEvent(); |
| 1890 | |
| 1891 | if (!isRemoved()) |
| 1892 | m_source->mediaElement()->videoTrackSelectedChanged(track); |
| 1893 | } |
| 1894 | |
| 1895 | void SourceBuffer::audioTrackEnabledChanged(AudioTrack& track) |
| 1896 | { |
| 1897 | // 2.4.5 Changes to selected/enabled track state |
| 1898 | // If an audio track becomes disabled and the SourceBuffer associated with this track is not |
| 1899 | // associated with any other enabled or selected track, then run the following steps: |
| 1900 | if (!track.enabled() |
| 1901 | && (!m_videoTracks || !m_videoTracks->isAnyTrackEnabled()) |
| 1902 | && (!m_audioTracks || !m_audioTracks->isAnyTrackEnabled()) |
| 1903 | && (!m_textTracks || !m_textTracks->isAnyTrackEnabled())) { |
| 1904 | // 1. Remove the SourceBuffer associated with the audio track from activeSourceBuffers |
| 1905 | // 2. Queue a task to fire a simple event named removesourcebuffer at activeSourceBuffers |
| 1906 | setActive(false); |
| 1907 | } else if (track.enabled()) { |
| 1908 | // If an audio track becomes enabled and the SourceBuffer associated with this track is |
| 1909 | // not already in activeSourceBuffers, then run the following steps: |
| 1910 | // 1. Add the SourceBuffer associated with the audio track to activeSourceBuffers |
| 1911 | // 2. Queue a task to fire a simple event named addsourcebuffer at activeSourceBuffers |
| 1912 | setActive(true); |
| 1913 | } |
| 1914 | |
| 1915 | if (m_audioTracks && m_audioTracks->contains(track)) |
| 1916 | m_audioTracks->scheduleChangeEvent(); |
| 1917 | |
| 1918 | if (!isRemoved()) |
| 1919 | m_source->mediaElement()->audioTrackEnabledChanged(track); |
| 1920 | } |
| 1921 | |
| 1922 | void SourceBuffer::textTrackModeChanged(TextTrack& track) |
| 1923 | { |
| 1924 | // 2.4.5 Changes to selected/enabled track state |
| 1925 | // If a text track mode becomes "disabled" and the SourceBuffer associated with this track is not |
| 1926 | // associated with any other enabled or selected track, then run the following steps: |
| 1927 | if (track.mode() == TextTrack::Mode::Disabled |
| 1928 | && (!m_videoTracks || !m_videoTracks->isAnyTrackEnabled()) |
| 1929 | && (!m_audioTracks || !m_audioTracks->isAnyTrackEnabled()) |
| 1930 | && (!m_textTracks || !m_textTracks->isAnyTrackEnabled())) { |
| 1931 | // 1. Remove the SourceBuffer associated with the audio track from activeSourceBuffers |
| 1932 | // 2. Queue a task to fire a simple event named removesourcebuffer at activeSourceBuffers |
| 1933 | setActive(false); |
| 1934 | } else { |
| 1935 | // If a text track mode becomes "showing" or "hidden" and the SourceBuffer associated with this |
| 1936 | // track is not already in activeSourceBuffers, then run the following steps: |
| 1937 | // 1. Add the SourceBuffer associated with the text track to activeSourceBuffers |
| 1938 | // 2. Queue a task to fire a simple event named addsourcebuffer at activeSourceBuffers |
| 1939 | setActive(true); |
| 1940 | } |
| 1941 | |
| 1942 | if (m_textTracks && m_textTracks->contains(track)) |
| 1943 | m_textTracks->scheduleChangeEvent(); |
| 1944 | |
| 1945 | if (!isRemoved()) |
| 1946 | m_source->mediaElement()->textTrackModeChanged(track); |
| 1947 | } |
| 1948 | |
| 1949 | void SourceBuffer::textTrackAddCue(TextTrack& track, TextTrackCue& cue) |
| 1950 | { |
| 1951 | if (!isRemoved()) |
| 1952 | m_source->mediaElement()->textTrackAddCue(track, cue); |
| 1953 | } |
| 1954 | |
| 1955 | void SourceBuffer::textTrackAddCues(TextTrack& track, const TextTrackCueList& cueList) |
| 1956 | { |
| 1957 | if (!isRemoved()) |
| 1958 | m_source->mediaElement()->textTrackAddCues(track, cueList); |
| 1959 | } |
| 1960 | |
| 1961 | void SourceBuffer::textTrackRemoveCue(TextTrack& track, TextTrackCue& cue) |
| 1962 | { |
| 1963 | if (!isRemoved()) |
| 1964 | m_source->mediaElement()->textTrackRemoveCue(track, cue); |
| 1965 | } |
| 1966 | |
| 1967 | void SourceBuffer::textTrackRemoveCues(TextTrack& track, const TextTrackCueList& cueList) |
| 1968 | { |
| 1969 | if (!isRemoved()) |
| 1970 | m_source->mediaElement()->textTrackRemoveCues(track, cueList); |
| 1971 | } |
| 1972 | |
| 1973 | void SourceBuffer::textTrackKindChanged(TextTrack& track) |
| 1974 | { |
| 1975 | if (!isRemoved()) |
| 1976 | m_source->mediaElement()->textTrackKindChanged(track); |
| 1977 | } |
| 1978 | |
| 1979 | void SourceBuffer::sourceBufferPrivateReenqueSamples(const AtomicString& trackID) |
| 1980 | { |
| 1981 | if (isRemoved()) |
| 1982 | return; |
| 1983 | |
| 1984 | DEBUG_LOG(LOGIDENTIFIER); |
| 1985 | auto it = m_trackBufferMap.find(trackID); |
| 1986 | if (it == m_trackBufferMap.end()) |
| 1987 | return; |
| 1988 | |
| 1989 | auto& trackBuffer = it->value; |
| 1990 | trackBuffer.needsReenqueueing = true; |
| 1991 | reenqueueMediaForTime(trackBuffer, trackID, m_source->currentTime()); |
| 1992 | } |
| 1993 | |
| 1994 | void SourceBuffer::sourceBufferPrivateDidBecomeReadyForMoreSamples(const AtomicString& trackID) |
| 1995 | { |
| 1996 | if (isRemoved()) |
| 1997 | return; |
| 1998 | |
| 1999 | DEBUG_LOG(LOGIDENTIFIER); |
| 2000 | auto it = m_trackBufferMap.find(trackID); |
| 2001 | if (it == m_trackBufferMap.end()) |
| 2002 | return; |
| 2003 | |
| 2004 | auto& trackBuffer = it->value; |
| 2005 | if (!trackBuffer.needsReenqueueing && !m_source->isSeeking()) |
| 2006 | provideMediaData(trackBuffer, trackID); |
| 2007 | } |
| 2008 | |
| 2009 | void SourceBuffer::provideMediaData(TrackBuffer& trackBuffer, const AtomicString& trackID) |
| 2010 | { |
| 2011 | if (m_source->isSeeking()) |
| 2012 | return; |
| 2013 | |
| 2014 | #if !RELEASE_LOG_DISABLED |
| 2015 | unsigned enqueuedSamples = 0; |
| 2016 | #endif |
| 2017 | |
| 2018 | while (!trackBuffer.decodeQueue.empty()) { |
| 2019 | if (!m_private->isReadyForMoreSamples(trackID)) { |
| 2020 | m_private->notifyClientWhenReadyForMoreSamples(trackID); |
| 2021 | break; |
| 2022 | } |
| 2023 | |
| 2024 | // FIXME(rdar://problem/20635969): Remove this re-entrancy protection when the aforementioned radar is resolved; protecting |
| 2025 | // against re-entrancy introduces a small inefficency when removing appended samples from the decode queue one at a time |
| 2026 | // rather than when all samples have been enqueued. |
| 2027 | auto sample = trackBuffer.decodeQueue.begin()->second; |
| 2028 | |
| 2029 | // Do not enqueue samples spanning a significant unbuffered gap. |
| 2030 | // NOTE: one second is somewhat arbitrary. MediaSource::monitorSourceBuffers() is run |
| 2031 | // on the playbackTimer, which is effectively every 350ms. Allowing > 350ms gap between |
| 2032 | // enqueued samples allows for situations where we overrun the end of a buffered range |
| 2033 | // but don't notice for 350s of playback time, and the client can enqueue data for the |
| 2034 | // new current time without triggering this early return. |
| 2035 | // FIXME(135867): Make this gap detection logic less arbitrary. |
| 2036 | MediaTime oneSecond(1, 1); |
| 2037 | if (trackBuffer.lastEnqueuedDecodeKey.first.isValid() |
| 2038 | && trackBuffer.lastEnqueuedDecodeDuration.isValid() |
| 2039 | && sample->decodeTime() - trackBuffer.lastEnqueuedDecodeKey.first > oneSecond + trackBuffer.lastEnqueuedDecodeDuration) |
| 2040 | break; |
| 2041 | |
| 2042 | // Remove the sample from the decode queue now. |
| 2043 | trackBuffer.decodeQueue.erase(trackBuffer.decodeQueue.begin()); |
| 2044 | |
| 2045 | trackBuffer.lastEnqueuedPresentationTime = sample->presentationTime(); |
| 2046 | trackBuffer.lastEnqueuedDecodeKey = {sample->decodeTime(), sample->presentationTime()}; |
| 2047 | trackBuffer.lastEnqueuedDecodeDuration = sample->duration(); |
| 2048 | m_private->enqueueSample(sample.releaseNonNull(), trackID); |
| 2049 | #if !RELEASE_LOG_DISABLED |
| 2050 | ++enqueuedSamples; |
| 2051 | #endif |
| 2052 | } |
| 2053 | |
| 2054 | #if !RELEASE_LOG_DISABLED |
| 2055 | DEBUG_LOG(LOGIDENTIFIER, "enqueued " , enqueuedSamples, " samples, " , static_cast<size_t>(trackBuffer.decodeQueue.size()), " remaining" ); |
| 2056 | #endif |
| 2057 | |
| 2058 | trySignalAllSamplesInTrackEnqueued(trackID); |
| 2059 | } |
| 2060 | |
| 2061 | void SourceBuffer::trySignalAllSamplesInTrackEnqueued(const AtomicString& trackID) |
| 2062 | { |
| 2063 | if (m_source->isEnded() && m_trackBufferMap.get(trackID).decodeQueue.empty()) { |
| 2064 | DEBUG_LOG(LOGIDENTIFIER, "enqueued all samples from track " , trackID); |
| 2065 | m_private->allSamplesInTrackEnqueued(trackID); |
| 2066 | } |
| 2067 | } |
| 2068 | |
| 2069 | void SourceBuffer::trySignalAllSamplesEnqueued() |
| 2070 | { |
| 2071 | for (const AtomicString& trackID : m_trackBufferMap.keys()) |
| 2072 | trySignalAllSamplesInTrackEnqueued(trackID); |
| 2073 | } |
| 2074 | |
| 2075 | void SourceBuffer::reenqueueMediaForTime(TrackBuffer& trackBuffer, const AtomicString& trackID, const MediaTime& time) |
| 2076 | { |
| 2077 | m_private->flush(trackID); |
| 2078 | trackBuffer.decodeQueue.clear(); |
| 2079 | |
| 2080 | // Find the sample which contains the current presentation time. |
| 2081 | auto currentSamplePTSIterator = trackBuffer.samples.presentationOrder().findSampleContainingPresentationTime(time); |
| 2082 | |
| 2083 | if (currentSamplePTSIterator == trackBuffer.samples.presentationOrder().end()) |
| 2084 | currentSamplePTSIterator = trackBuffer.samples.presentationOrder().findSampleStartingOnOrAfterPresentationTime(time); |
| 2085 | |
| 2086 | if (currentSamplePTSIterator == trackBuffer.samples.presentationOrder().end() |
| 2087 | || (currentSamplePTSIterator->first - time) > MediaSource::currentTimeFudgeFactor()) |
| 2088 | return; |
| 2089 | |
| 2090 | // Seach backward for the previous sync sample. |
| 2091 | DecodeOrderSampleMap::KeyType decodeKey(currentSamplePTSIterator->second->decodeTime(), currentSamplePTSIterator->second->presentationTime()); |
| 2092 | auto currentSampleDTSIterator = trackBuffer.samples.decodeOrder().findSampleWithDecodeKey(decodeKey); |
| 2093 | ASSERT(currentSampleDTSIterator != trackBuffer.samples.decodeOrder().end()); |
| 2094 | |
| 2095 | auto reverseCurrentSampleIter = --DecodeOrderSampleMap::reverse_iterator(currentSampleDTSIterator); |
| 2096 | auto reverseLastSyncSampleIter = trackBuffer.samples.decodeOrder().findSyncSamplePriorToDecodeIterator(reverseCurrentSampleIter); |
| 2097 | if (reverseLastSyncSampleIter == trackBuffer.samples.decodeOrder().rend()) |
| 2098 | return; |
| 2099 | |
| 2100 | // Fill the decode queue with the non-displaying samples. |
| 2101 | for (auto iter = reverseLastSyncSampleIter; iter != reverseCurrentSampleIter; --iter) { |
| 2102 | auto copy = iter->second->createNonDisplayingCopy(); |
| 2103 | DecodeOrderSampleMap::KeyType decodeKey(copy->decodeTime(), copy->presentationTime()); |
| 2104 | trackBuffer.decodeQueue.insert(DecodeOrderSampleMap::MapType::value_type(decodeKey, WTFMove(copy))); |
| 2105 | } |
| 2106 | |
| 2107 | if (!trackBuffer.decodeQueue.empty()) { |
| 2108 | auto lastSampleIter = trackBuffer.decodeQueue.rbegin(); |
| 2109 | auto lastSampleDecodeKey = lastSampleIter->first; |
| 2110 | auto lastSampleDuration = lastSampleIter->second->duration(); |
| 2111 | trackBuffer.lastEnqueuedPresentationTime = lastSampleDecodeKey.second; |
| 2112 | trackBuffer.lastEnqueuedDecodeKey = lastSampleDecodeKey; |
| 2113 | trackBuffer.lastEnqueuedDecodeDuration = lastSampleDuration; |
| 2114 | } else { |
| 2115 | trackBuffer.lastEnqueuedPresentationTime = MediaTime::invalidTime(); |
| 2116 | trackBuffer.lastEnqueuedDecodeKey = {MediaTime::invalidTime(), MediaTime::invalidTime()}; |
| 2117 | trackBuffer.lastEnqueuedDecodeDuration = MediaTime::invalidTime(); |
| 2118 | } |
| 2119 | |
| 2120 | // Fill the decode queue with the remaining samples. |
| 2121 | for (auto iter = currentSampleDTSIterator; iter != trackBuffer.samples.decodeOrder().end(); ++iter) |
| 2122 | trackBuffer.decodeQueue.insert(*iter); |
| 2123 | provideMediaData(trackBuffer, trackID); |
| 2124 | |
| 2125 | trackBuffer.needsReenqueueing = false; |
| 2126 | } |
| 2127 | |
| 2128 | |
| 2129 | void SourceBuffer::didDropSample() |
| 2130 | { |
| 2131 | if (!isRemoved()) |
| 2132 | m_source->mediaElement()->incrementDroppedFrameCount(); |
| 2133 | } |
| 2134 | |
| 2135 | void SourceBuffer::monitorBufferingRate() |
| 2136 | { |
| 2137 | MonotonicTime now = MonotonicTime::now(); |
| 2138 | Seconds interval = now - m_timeOfBufferingMonitor; |
| 2139 | double rateSinceLastMonitor = m_bufferedSinceLastMonitor / interval.seconds(); |
| 2140 | |
| 2141 | m_timeOfBufferingMonitor = now; |
| 2142 | m_bufferedSinceLastMonitor = 0; |
| 2143 | |
| 2144 | m_averageBufferRate += (interval.seconds() * ExponentialMovingAverageCoefficient) * (rateSinceLastMonitor - m_averageBufferRate); |
| 2145 | |
| 2146 | DEBUG_LOG(LOGIDENTIFIER, m_averageBufferRate); |
| 2147 | } |
| 2148 | |
| 2149 | void SourceBuffer::updateBufferedFromTrackBuffers() |
| 2150 | { |
| 2151 | // 3.1 Attributes, buffered |
| 2152 | // https://rawgit.com/w3c/media-source/45627646344eea0170dd1cbc5a3d508ca751abb8/media-source-respec.html#dom-sourcebuffer-buffered |
| 2153 | |
| 2154 | // 2. Let highest end time be the largest track buffer ranges end time across all the track buffers managed by this SourceBuffer object. |
| 2155 | MediaTime highestEndTime = MediaTime::negativeInfiniteTime(); |
| 2156 | for (auto& trackBuffer : m_trackBufferMap.values()) { |
| 2157 | if (!trackBuffer.buffered.length()) |
| 2158 | continue; |
| 2159 | highestEndTime = std::max(highestEndTime, trackBuffer.buffered.maximumBufferedTime()); |
| 2160 | } |
| 2161 | |
| 2162 | // NOTE: Short circuit the following if none of the TrackBuffers have buffered ranges to avoid generating |
| 2163 | // a single range of {0, 0}. |
| 2164 | if (highestEndTime.isNegativeInfinite()) { |
| 2165 | m_buffered->ranges() = PlatformTimeRanges(); |
| 2166 | return; |
| 2167 | } |
| 2168 | |
| 2169 | // 3. Let intersection ranges equal a TimeRange object containing a single range from 0 to highest end time. |
| 2170 | PlatformTimeRanges intersectionRanges { MediaTime::zeroTime(), highestEndTime }; |
| 2171 | |
| 2172 | // 4. For each audio and video track buffer managed by this SourceBuffer, run the following steps: |
| 2173 | for (auto& trackBuffer : m_trackBufferMap.values()) { |
| 2174 | // 4.1 Let track ranges equal the track buffer ranges for the current track buffer. |
| 2175 | PlatformTimeRanges trackRanges = trackBuffer.buffered; |
| 2176 | if (!trackRanges.length()) |
| 2177 | continue; |
| 2178 | |
| 2179 | // 4.2 If readyState is "ended", then set the end time on the last range in track ranges to highest end time. |
| 2180 | if (m_source->isEnded()) |
| 2181 | trackRanges.add(trackRanges.maximumBufferedTime(), highestEndTime); |
| 2182 | |
| 2183 | // 4.3 Let new intersection ranges equal the intersection between the intersection ranges and the track ranges. |
| 2184 | // 4.4 Replace the ranges in intersection ranges with the new intersection ranges. |
| 2185 | intersectionRanges.intersectWith(trackRanges); |
| 2186 | } |
| 2187 | |
| 2188 | // 5. If intersection ranges does not contain the exact same range information as the current value of this attribute, |
| 2189 | // then update the current value of this attribute to intersection ranges. |
| 2190 | m_buffered->ranges() = intersectionRanges; |
| 2191 | setBufferedDirty(true); |
| 2192 | } |
| 2193 | |
| 2194 | bool SourceBuffer::canPlayThroughRange(PlatformTimeRanges& ranges) |
| 2195 | { |
| 2196 | if (isRemoved()) |
| 2197 | return false; |
| 2198 | |
| 2199 | monitorBufferingRate(); |
| 2200 | |
| 2201 | // Assuming no fluctuations in the buffering rate, loading 1 second per second or greater |
| 2202 | // means indefinite playback. This could be improved by taking jitter into account. |
| 2203 | if (m_averageBufferRate > 1) |
| 2204 | return true; |
| 2205 | |
| 2206 | // Add up all the time yet to be buffered. |
| 2207 | MediaTime currentTime = m_source->currentTime(); |
| 2208 | MediaTime duration = m_source->duration(); |
| 2209 | |
| 2210 | PlatformTimeRanges unbufferedRanges = ranges; |
| 2211 | unbufferedRanges.invert(); |
| 2212 | unbufferedRanges.intersectWith(PlatformTimeRanges(currentTime, std::max(currentTime, duration))); |
| 2213 | MediaTime unbufferedTime = unbufferedRanges.totalDuration(); |
| 2214 | if (!unbufferedTime.isValid()) |
| 2215 | return true; |
| 2216 | |
| 2217 | MediaTime timeRemaining = duration - currentTime; |
| 2218 | return unbufferedTime.toDouble() / m_averageBufferRate < timeRemaining.toDouble(); |
| 2219 | } |
| 2220 | |
| 2221 | size_t SourceBuffer::() const |
| 2222 | { |
| 2223 | size_t = m_pendingAppendData.capacity(); |
| 2224 | for (auto& trackBuffer : m_trackBufferMap.values()) |
| 2225 | extraMemoryCost += trackBuffer.samples.sizeInBytes(); |
| 2226 | |
| 2227 | return extraMemoryCost; |
| 2228 | } |
| 2229 | |
| 2230 | void SourceBuffer::() |
| 2231 | { |
| 2232 | size_t = this->extraMemoryCost(); |
| 2233 | if (extraMemoryCost <= m_reportedExtraMemoryCost) |
| 2234 | return; |
| 2235 | |
| 2236 | size_t = extraMemoryCost - m_reportedExtraMemoryCost; |
| 2237 | m_reportedExtraMemoryCost = extraMemoryCost; |
| 2238 | |
| 2239 | JSC::JSLockHolder lock(scriptExecutionContext()->vm()); |
| 2240 | // FIXME: Adopt reportExtraMemoryVisited, and switch to reportExtraMemoryAllocated. |
| 2241 | // https://bugs.webkit.org/show_bug.cgi?id=142595 |
| 2242 | scriptExecutionContext()->vm().heap.deprecatedReportExtraMemory(extraMemoryCostDelta); |
| 2243 | } |
| 2244 | |
| 2245 | Vector<String> SourceBuffer::bufferedSamplesForTrackID(const AtomicString& trackID) |
| 2246 | { |
| 2247 | auto it = m_trackBufferMap.find(trackID); |
| 2248 | if (it == m_trackBufferMap.end()) |
| 2249 | return Vector<String>(); |
| 2250 | |
| 2251 | TrackBuffer& trackBuffer = it->value; |
| 2252 | Vector<String> sampleDescriptions; |
| 2253 | for (auto& pair : trackBuffer.samples.decodeOrder()) |
| 2254 | sampleDescriptions.append(toString(*pair.second)); |
| 2255 | |
| 2256 | return sampleDescriptions; |
| 2257 | } |
| 2258 | |
| 2259 | Vector<String> SourceBuffer::enqueuedSamplesForTrackID(const AtomicString& trackID) |
| 2260 | { |
| 2261 | return m_private->enqueuedSamplesForTrackID(trackID); |
| 2262 | } |
| 2263 | |
| 2264 | Document& SourceBuffer::document() const |
| 2265 | { |
| 2266 | ASSERT(scriptExecutionContext()); |
| 2267 | return downcast<Document>(*scriptExecutionContext()); |
| 2268 | } |
| 2269 | |
| 2270 | ExceptionOr<void> SourceBuffer::setMode(AppendMode newMode) |
| 2271 | { |
| 2272 | // 3.1 Attributes - mode |
| 2273 | // http://www.w3.org/TR/media-source/#widl-SourceBuffer-mode |
| 2274 | |
| 2275 | // On setting, run the following steps: |
| 2276 | |
| 2277 | // 1. Let new mode equal the new value being assigned to this attribute. |
| 2278 | // 2. If generate timestamps flag equals true and new mode equals "segments", then throw an InvalidAccessError exception and abort these steps. |
| 2279 | if (m_shouldGenerateTimestamps && newMode == AppendMode::Segments) |
| 2280 | return Exception { InvalidAccessError }; |
| 2281 | |
| 2282 | // 3. If this object has been removed from the sourceBuffers attribute of the parent media source, then throw an InvalidStateError exception and abort these steps. |
| 2283 | // 4. If the updating attribute equals true, then throw an InvalidStateError exception and abort these steps. |
| 2284 | if (isRemoved() || m_updating) |
| 2285 | return Exception { InvalidStateError }; |
| 2286 | |
| 2287 | // 5. If the readyState attribute of the parent media source is in the "ended" state then run the following steps: |
| 2288 | if (m_source->isEnded()) { |
| 2289 | // 5.1. Set the readyState attribute of the parent media source to "open" |
| 2290 | // 5.2. Queue a task to fire a simple event named sourceopen at the parent media source. |
| 2291 | m_source->openIfInEndedState(); |
| 2292 | } |
| 2293 | |
| 2294 | // 6. If the append state equals PARSING_MEDIA_SEGMENT, then throw an InvalidStateError and abort these steps. |
| 2295 | if (m_appendState == ParsingMediaSegment) |
| 2296 | return Exception { InvalidStateError }; |
| 2297 | |
| 2298 | // 7. If the new mode equals "sequence", then set the group start timestamp to the group end timestamp. |
| 2299 | if (newMode == AppendMode::Sequence) |
| 2300 | m_groupStartTimestamp = m_groupEndTimestamp; |
| 2301 | |
| 2302 | // 8. Update the attribute to new mode. |
| 2303 | m_mode = newMode; |
| 2304 | |
| 2305 | return { }; |
| 2306 | } |
| 2307 | |
| 2308 | #if !RELEASE_LOG_DISABLED |
| 2309 | WTFLogChannel& SourceBuffer::logChannel() const |
| 2310 | { |
| 2311 | return LogMediaSource; |
| 2312 | } |
| 2313 | #endif |
| 2314 | |
| 2315 | } // namespace WebCore |
| 2316 | |
| 2317 | #endif |
| 2318 | |