1/*
2 * Copyright (C) 2010 Google Inc. All rights reserved.
3 * Copyright (C) 2016 Apple Inc. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY
15 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
16 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
17 * DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY
18 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
19 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
20 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
21 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
23 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#include "config.h"
27
28#if ENABLE(WEB_AUDIO)
29
30#include "AudioContext.h"
31
32#include "AnalyserNode.h"
33#include "AsyncAudioDecoder.h"
34#include "AudioBuffer.h"
35#include "AudioBufferCallback.h"
36#include "AudioBufferSourceNode.h"
37#include "AudioListener.h"
38#include "AudioNodeInput.h"
39#include "AudioNodeOutput.h"
40#include "AudioSession.h"
41#include "BiquadFilterNode.h"
42#include "ChannelMergerNode.h"
43#include "ChannelSplitterNode.h"
44#include "ConvolverNode.h"
45#include "DefaultAudioDestinationNode.h"
46#include "DelayNode.h"
47#include "Document.h"
48#include "DynamicsCompressorNode.h"
49#include "EventNames.h"
50#include "FFTFrame.h"
51#include "Frame.h"
52#include "FrameLoader.h"
53#include "GainNode.h"
54#include "GenericEventQueue.h"
55#include "HRTFDatabaseLoader.h"
56#include "HRTFPanner.h"
57#include "JSDOMPromiseDeferred.h"
58#include "Logging.h"
59#include "NetworkingContext.h"
60#include "OfflineAudioCompletionEvent.h"
61#include "OfflineAudioDestinationNode.h"
62#include "OscillatorNode.h"
63#include "Page.h"
64#include "PannerNode.h"
65#include "PeriodicWave.h"
66#include "ScriptController.h"
67#include "ScriptProcessorNode.h"
68#include "WaveShaperNode.h"
69#include <JavaScriptCore/ScriptCallStack.h>
70
71#if ENABLE(MEDIA_STREAM)
72#include "MediaStream.h"
73#include "MediaStreamAudioDestinationNode.h"
74#include "MediaStreamAudioSource.h"
75#include "MediaStreamAudioSourceNode.h"
76#endif
77
78#if ENABLE(VIDEO)
79#include "HTMLMediaElement.h"
80#include "MediaElementAudioSourceNode.h"
81#endif
82
83#if DEBUG_AUDIONODE_REFERENCES
84#include <stdio.h>
85#endif
86
87#if USE(GSTREAMER)
88#include "GStreamerCommon.h"
89#endif
90
91#if PLATFORM(IOS_FAMILY)
92#include "ScriptController.h"
93#include "Settings.h"
94#endif
95
96#include <JavaScriptCore/ArrayBuffer.h>
97#include <wtf/Atomics.h>
98#include <wtf/IsoMallocInlines.h>
99#include <wtf/MainThread.h>
100#include <wtf/Ref.h>
101#include <wtf/RefCounted.h>
102#include <wtf/text/WTFString.h>
103
104const unsigned MaxPeriodicWaveLength = 4096;
105
106namespace WebCore {
107
108WTF_MAKE_ISO_ALLOCATED_IMPL(AudioContext);
109
110#define RELEASE_LOG_IF_ALLOWED(fmt, ...) RELEASE_LOG_IF(document()->page() && document()->page()->isAlwaysOnLoggingAllowed(), Media, "%p - AudioContext::" fmt, this, ##__VA_ARGS__)
111
112bool AudioContext::isSampleRateRangeGood(float sampleRate)
113{
114 // FIXME: It would be nice if the minimum sample-rate could be less than 44.1KHz,
115 // but that will require some fixes in HRTFPanner::fftSizeForSampleRate(), and some testing there.
116 return sampleRate >= 44100 && sampleRate <= 96000;
117}
118
119// Don't allow more than this number of simultaneous AudioContexts talking to hardware.
120const unsigned MaxHardwareContexts = 4;
121unsigned AudioContext::s_hardwareContextCount = 0;
122
123RefPtr<AudioContext> AudioContext::create(Document& document)
124{
125 ASSERT(isMainThread());
126 if (s_hardwareContextCount >= MaxHardwareContexts)
127 return nullptr;
128
129 RefPtr<AudioContext> audioContext(adoptRef(new AudioContext(document)));
130 audioContext->suspendIfNeeded();
131 return audioContext;
132}
133
134// Constructor for rendering to the audio hardware.
135AudioContext::AudioContext(Document& document)
136 : ActiveDOMObject(document)
137#if !RELEASE_LOG_DISABLED
138 , m_logger(document.logger())
139 , m_logIdentifier(uniqueLogIdentifier())
140#endif
141 , m_mediaSession(PlatformMediaSession::create(*this))
142 , m_eventQueue(std::make_unique<GenericEventQueue>(*this))
143{
144 constructCommon();
145
146 m_destinationNode = DefaultAudioDestinationNode::create(*this);
147
148 // Initialize the destination node's muted state to match the page's current muted state.
149 pageMutedStateDidChange();
150
151 document.addAudioProducer(*this);
152 document.registerForVisibilityStateChangedCallbacks(*this);
153}
154
155// Constructor for offline (non-realtime) rendering.
156AudioContext::AudioContext(Document& document, unsigned numberOfChannels, size_t numberOfFrames, float sampleRate)
157 : ActiveDOMObject(document)
158#if !RELEASE_LOG_DISABLED
159 , m_logger(document.logger())
160 , m_logIdentifier(uniqueLogIdentifier())
161#endif
162 , m_isOfflineContext(true)
163 , m_mediaSession(PlatformMediaSession::create(*this))
164 , m_eventQueue(std::make_unique<GenericEventQueue>(*this))
165{
166 constructCommon();
167
168 // Create a new destination for offline rendering.
169 m_renderTarget = AudioBuffer::create(numberOfChannels, numberOfFrames, sampleRate);
170 m_destinationNode = OfflineAudioDestinationNode::create(*this, m_renderTarget.get());
171}
172
173void AudioContext::constructCommon()
174{
175 // According to spec AudioContext must die only after page navigate.
176 // Lets mark it as ActiveDOMObject with pending activity and unmark it in clear method.
177 setPendingActivity(*this);
178
179 FFTFrame::initialize();
180
181 m_listener = AudioListener::create();
182
183 if (document()->audioPlaybackRequiresUserGesture())
184 addBehaviorRestriction(RequireUserGestureForAudioStartRestriction);
185 else
186 m_restrictions = NoRestrictions;
187
188#if PLATFORM(COCOA)
189 addBehaviorRestriction(RequirePageConsentForAudioStartRestriction);
190#endif
191}
192
193AudioContext::~AudioContext()
194{
195#if DEBUG_AUDIONODE_REFERENCES
196 fprintf(stderr, "%p: AudioContext::~AudioContext()\n", this);
197#endif
198 ASSERT(!m_isInitialized);
199 ASSERT(m_isStopScheduled);
200 ASSERT(m_nodesToDelete.isEmpty());
201 ASSERT(m_referencedNodes.isEmpty());
202 ASSERT(m_finishedNodes.isEmpty()); // FIXME (bug 105870): This assertion fails on tests sometimes.
203 ASSERT(m_automaticPullNodes.isEmpty());
204 if (m_automaticPullNodesNeedUpdating)
205 m_renderingAutomaticPullNodes.resize(m_automaticPullNodes.size());
206 ASSERT(m_renderingAutomaticPullNodes.isEmpty());
207 // FIXME: Can we assert that m_deferredFinishDerefList is empty?
208
209 if (!isOfflineContext() && scriptExecutionContext()) {
210 document()->removeAudioProducer(*this);
211 document()->unregisterForVisibilityStateChangedCallbacks(*this);
212 }
213}
214
215void AudioContext::lazyInitialize()
216{
217 ASSERT(!m_isStopScheduled);
218
219 if (m_isInitialized)
220 return;
221
222 // Don't allow the context to initialize a second time after it's already been explicitly uninitialized.
223 ASSERT(!m_isAudioThreadFinished);
224 if (m_isAudioThreadFinished)
225 return;
226
227 if (m_destinationNode) {
228 m_destinationNode->initialize();
229
230 if (!isOfflineContext()) {
231 // This starts the audio thread. The destination node's provideInput() method will now be called repeatedly to render audio.
232 // Each time provideInput() is called, a portion of the audio stream is rendered. Let's call this time period a "render quantum".
233 // NOTE: for now default AudioContext does not need an explicit startRendering() call from JavaScript.
234 // We may want to consider requiring it for symmetry with OfflineAudioContext.
235 startRendering();
236 ++s_hardwareContextCount;
237 }
238 }
239 m_isInitialized = true;
240}
241
242void AudioContext::clear()
243{
244 // We have to release our reference to the destination node before the context will ever be deleted since the destination node holds a reference to the context.
245 if (m_destinationNode)
246 m_destinationNode = nullptr;
247
248 // Audio thread is dead. Nobody will schedule node deletion action. Let's do it ourselves.
249 do {
250 deleteMarkedNodes();
251 m_nodesToDelete.appendVector(m_nodesMarkedForDeletion);
252 m_nodesMarkedForDeletion.clear();
253 } while (m_nodesToDelete.size());
254
255 // It was set in constructCommon.
256 unsetPendingActivity(*this);
257}
258
259void AudioContext::uninitialize()
260{
261 ALWAYS_LOG(LOGIDENTIFIER);
262
263 ASSERT(isMainThread());
264
265 if (!m_isInitialized)
266 return;
267
268 // This stops the audio thread and all audio rendering.
269 m_destinationNode->uninitialize();
270
271 // Don't allow the context to initialize a second time after it's already been explicitly uninitialized.
272 m_isAudioThreadFinished = true;
273
274 if (!isOfflineContext()) {
275 ASSERT(s_hardwareContextCount);
276 --s_hardwareContextCount;
277
278 // Offline contexts move to 'Closed' state when dispatching the completion event.
279 setState(State::Closed);
280 }
281
282 // Get rid of the sources which may still be playing.
283 derefUnfinishedSourceNodes();
284
285 m_isInitialized = false;
286}
287
288bool AudioContext::isInitialized() const
289{
290 return m_isInitialized;
291}
292
293void AudioContext::addReaction(State state, DOMPromiseDeferred<void>&& promise)
294{
295 size_t stateIndex = static_cast<size_t>(state);
296 if (stateIndex >= m_stateReactions.size())
297 m_stateReactions.grow(stateIndex + 1);
298
299 m_stateReactions[stateIndex].append(WTFMove(promise));
300}
301
302void AudioContext::setState(State state)
303{
304 if (m_state == state)
305 return;
306
307 m_state = state;
308 m_eventQueue->enqueueEvent(Event::create(eventNames().statechangeEvent, Event::CanBubble::Yes, Event::IsCancelable::No));
309
310 size_t stateIndex = static_cast<size_t>(state);
311 if (stateIndex >= m_stateReactions.size())
312 return;
313
314 Vector<DOMPromiseDeferred<void>> reactions;
315 m_stateReactions[stateIndex].swap(reactions);
316
317 for (auto& promise : reactions)
318 promise.resolve();
319}
320
321void AudioContext::stop()
322{
323 ALWAYS_LOG(LOGIDENTIFIER);
324
325 ASSERT(isMainThread());
326
327 // Usually ScriptExecutionContext calls stop twice.
328 if (m_isStopScheduled)
329 return;
330 m_isStopScheduled = true;
331
332 document()->updateIsPlayingMedia();
333
334 m_eventQueue->close();
335
336 uninitialize();
337 clear();
338}
339
340bool AudioContext::canSuspendForDocumentSuspension() const
341{
342 // FIXME: We should be able to suspend while rendering as well with some more code.
343 return m_state == State::Suspended || m_state == State::Closed;
344}
345
346const char* AudioContext::activeDOMObjectName() const
347{
348 return "AudioContext";
349}
350
351Document* AudioContext::document() const
352{
353 ASSERT(m_scriptExecutionContext);
354 return downcast<Document>(m_scriptExecutionContext);
355}
356
357Document* AudioContext::hostingDocument() const
358{
359 return downcast<Document>(m_scriptExecutionContext);
360}
361
362String AudioContext::sourceApplicationIdentifier() const
363{
364 Document* document = this->document();
365 if (Frame* frame = document ? document->frame() : nullptr) {
366 if (NetworkingContext* networkingContext = frame->loader().networkingContext())
367 return networkingContext->sourceApplicationIdentifier();
368 }
369 return emptyString();
370}
371
372bool AudioContext::processingUserGestureForMedia() const
373{
374 return document() ? document()->processingUserGestureForMedia() : false;
375}
376
377bool AudioContext::isSuspended() const
378{
379 return !document() || document()->activeDOMObjectsAreSuspended() || document()->activeDOMObjectsAreStopped();
380}
381
382void AudioContext::visibilityStateChanged()
383{
384 // Do not suspend if audio is audible.
385 if (mediaState() == MediaProducer::IsPlayingAudio || m_isStopScheduled)
386 return;
387
388 if (document()->hidden()) {
389 if (state() == State::Running) {
390 RELEASE_LOG_IF_ALLOWED("visibilityStateChanged() Suspending playback after going to the background");
391 m_mediaSession->beginInterruption(PlatformMediaSession::EnteringBackground);
392 }
393 } else {
394 if (state() == State::Interrupted) {
395 RELEASE_LOG_IF_ALLOWED("visibilityStateChanged() Resuming playback after entering foreground");
396 m_mediaSession->endInterruption(PlatformMediaSession::MayResumePlaying);
397 }
398 }
399}
400
401bool AudioContext::wouldTaintOrigin(const URL& url) const
402{
403 if (url.protocolIsData())
404 return false;
405
406 if (auto* document = this->document())
407 return !document->securityOrigin().canRequest(url);
408
409 return false;
410}
411
412ExceptionOr<Ref<AudioBuffer>> AudioContext::createBuffer(unsigned numberOfChannels, size_t numberOfFrames, float sampleRate)
413{
414 auto audioBuffer = AudioBuffer::create(numberOfChannels, numberOfFrames, sampleRate);
415 if (!audioBuffer)
416 return Exception { NotSupportedError };
417 return audioBuffer.releaseNonNull();
418}
419
420ExceptionOr<Ref<AudioBuffer>> AudioContext::createBuffer(ArrayBuffer& arrayBuffer, bool mixToMono)
421{
422 auto audioBuffer = AudioBuffer::createFromAudioFileData(arrayBuffer.data(), arrayBuffer.byteLength(), mixToMono, sampleRate());
423 if (!audioBuffer)
424 return Exception { SyntaxError };
425 return audioBuffer.releaseNonNull();
426}
427
428void AudioContext::decodeAudioData(Ref<ArrayBuffer>&& audioData, RefPtr<AudioBufferCallback>&& successCallback, RefPtr<AudioBufferCallback>&& errorCallback)
429{
430 m_audioDecoder.decodeAsync(WTFMove(audioData), sampleRate(), WTFMove(successCallback), WTFMove(errorCallback));
431}
432
433ExceptionOr<Ref<AudioBufferSourceNode>> AudioContext::createBufferSource()
434{
435 ALWAYS_LOG(LOGIDENTIFIER);
436
437 ASSERT(isMainThread());
438
439 if (m_isStopScheduled)
440 return Exception { InvalidStateError };
441
442 lazyInitialize();
443 Ref<AudioBufferSourceNode> node = AudioBufferSourceNode::create(*this, m_destinationNode->sampleRate());
444
445 // Because this is an AudioScheduledSourceNode, the context keeps a reference until it has finished playing.
446 // When this happens, AudioScheduledSourceNode::finish() calls AudioContext::notifyNodeFinishedProcessing().
447 refNode(node);
448
449 return node;
450}
451
452#if ENABLE(VIDEO)
453
454ExceptionOr<Ref<MediaElementAudioSourceNode>> AudioContext::createMediaElementSource(HTMLMediaElement& mediaElement)
455{
456 ALWAYS_LOG(LOGIDENTIFIER);
457
458 ASSERT(isMainThread());
459
460 if (m_isStopScheduled || mediaElement.audioSourceNode())
461 return Exception { InvalidStateError };
462
463 lazyInitialize();
464
465 auto node = MediaElementAudioSourceNode::create(*this, mediaElement);
466
467 mediaElement.setAudioSourceNode(node.ptr());
468
469 refNode(node.get()); // context keeps reference until node is disconnected
470 return node;
471}
472
473#endif
474
475#if ENABLE(MEDIA_STREAM)
476
477ExceptionOr<Ref<MediaStreamAudioSourceNode>> AudioContext::createMediaStreamSource(MediaStream& mediaStream)
478{
479 ALWAYS_LOG(LOGIDENTIFIER);
480
481 ASSERT(isMainThread());
482
483 if (m_isStopScheduled)
484 return Exception { InvalidStateError };
485
486 auto audioTracks = mediaStream.getAudioTracks();
487 if (audioTracks.isEmpty())
488 return Exception { InvalidStateError };
489
490 MediaStreamTrack* providerTrack = nullptr;
491 for (auto& track : audioTracks) {
492 if (track->audioSourceProvider()) {
493 providerTrack = track.get();
494 break;
495 }
496 }
497 if (!providerTrack)
498 return Exception { InvalidStateError };
499
500 lazyInitialize();
501
502 auto node = MediaStreamAudioSourceNode::create(*this, mediaStream, *providerTrack);
503 node->setFormat(2, sampleRate());
504
505 refNode(node); // context keeps reference until node is disconnected
506 return node;
507}
508
509ExceptionOr<Ref<MediaStreamAudioDestinationNode>> AudioContext::createMediaStreamDestination()
510{
511 if (m_isStopScheduled)
512 return Exception { InvalidStateError };
513
514 // FIXME: Add support for an optional argument which specifies the number of channels.
515 // FIXME: The default should probably be stereo instead of mono.
516 return MediaStreamAudioDestinationNode::create(*this, 1);
517}
518
519#endif
520
521ExceptionOr<Ref<ScriptProcessorNode>> AudioContext::createScriptProcessor(size_t bufferSize, size_t numberOfInputChannels, size_t numberOfOutputChannels)
522{
523 ALWAYS_LOG(LOGIDENTIFIER);
524
525 ASSERT(isMainThread());
526
527 if (m_isStopScheduled)
528 return Exception { InvalidStateError };
529
530 lazyInitialize();
531
532 // W3C Editor's Draft 06 June 2017
533 // https://webaudio.github.io/web-audio-api/#widl-BaseAudioContext-createScriptProcessor-ScriptProcessorNode-unsigned-long-bufferSize-unsigned-long-numberOfInputChannels-unsigned-long-numberOfOutputChannels
534
535 // The bufferSize parameter determines the buffer size in units of sample-frames. If it's not passed in,
536 // or if the value is 0, then the implementation will choose the best buffer size for the given environment,
537 // which will be constant power of 2 throughout the lifetime of the node. ... If the value of this parameter
538 // is not one of the allowed power-of-2 values listed above, an IndexSizeError must be thrown.
539 switch (bufferSize) {
540 case 0:
541#if USE(AUDIO_SESSION)
542 // Pick a value between 256 (2^8) and 16384 (2^14), based on the buffer size of the current AudioSession:
543 bufferSize = 1 << std::max<size_t>(8, std::min<size_t>(14, std::log2(AudioSession::sharedSession().bufferSize())));
544#else
545 bufferSize = 2048;
546#endif
547 break;
548 case 256:
549 case 512:
550 case 1024:
551 case 2048:
552 case 4096:
553 case 8192:
554 case 16384:
555 break;
556 default:
557 return Exception { IndexSizeError };
558 }
559
560 // An IndexSizeError exception must be thrown if bufferSize or numberOfInputChannels or numberOfOutputChannels
561 // are outside the valid range. It is invalid for both numberOfInputChannels and numberOfOutputChannels to be zero.
562 // In this case an IndexSizeError must be thrown.
563
564 if (!numberOfInputChannels && !numberOfOutputChannels)
565 return Exception { NotSupportedError };
566
567 // This parameter [numberOfInputChannels] determines the number of channels for this node's input. Values of
568 // up to 32 must be supported. A NotSupportedError must be thrown if the number of channels is not supported.
569
570 if (numberOfInputChannels > maxNumberOfChannels())
571 return Exception { NotSupportedError };
572
573 // This parameter [numberOfOutputChannels] determines the number of channels for this node's output. Values of
574 // up to 32 must be supported. A NotSupportedError must be thrown if the number of channels is not supported.
575
576 if (numberOfOutputChannels > maxNumberOfChannels())
577 return Exception { NotSupportedError };
578
579 auto node = ScriptProcessorNode::create(*this, m_destinationNode->sampleRate(), bufferSize, numberOfInputChannels, numberOfOutputChannels);
580
581 refNode(node); // context keeps reference until we stop making javascript rendering callbacks
582 return node;
583}
584
585ExceptionOr<Ref<BiquadFilterNode>> AudioContext::createBiquadFilter()
586{
587 ALWAYS_LOG(LOGIDENTIFIER);
588
589 ASSERT(isMainThread());
590 if (m_isStopScheduled)
591 return Exception { InvalidStateError };
592
593 lazyInitialize();
594
595 return BiquadFilterNode::create(*this, m_destinationNode->sampleRate());
596}
597
598ExceptionOr<Ref<WaveShaperNode>> AudioContext::createWaveShaper()
599{
600 ALWAYS_LOG(LOGIDENTIFIER);
601
602 ASSERT(isMainThread());
603 if (m_isStopScheduled)
604 return Exception { InvalidStateError };
605
606 lazyInitialize();
607 return WaveShaperNode::create(*this);
608}
609
610ExceptionOr<Ref<PannerNode>> AudioContext::createPanner()
611{
612 ALWAYS_LOG(LOGIDENTIFIER);
613
614 ASSERT(isMainThread());
615 if (m_isStopScheduled)
616 return Exception { InvalidStateError };
617
618 lazyInitialize();
619 return PannerNode::create(*this, m_destinationNode->sampleRate());
620}
621
622ExceptionOr<Ref<ConvolverNode>> AudioContext::createConvolver()
623{
624 ALWAYS_LOG(LOGIDENTIFIER);
625
626 ASSERT(isMainThread());
627 if (m_isStopScheduled)
628 return Exception { InvalidStateError };
629
630 lazyInitialize();
631 return ConvolverNode::create(*this, m_destinationNode->sampleRate());
632}
633
634ExceptionOr<Ref<DynamicsCompressorNode>> AudioContext::createDynamicsCompressor()
635{
636 ALWAYS_LOG(LOGIDENTIFIER);
637
638 ASSERT(isMainThread());
639 if (m_isStopScheduled)
640 return Exception { InvalidStateError };
641
642 lazyInitialize();
643 return DynamicsCompressorNode::create(*this, m_destinationNode->sampleRate());
644}
645
646ExceptionOr<Ref<AnalyserNode>> AudioContext::createAnalyser()
647{
648 ALWAYS_LOG(LOGIDENTIFIER);
649
650 ASSERT(isMainThread());
651 if (m_isStopScheduled)
652 return Exception { InvalidStateError };
653
654 lazyInitialize();
655 return AnalyserNode::create(*this, m_destinationNode->sampleRate());
656}
657
658ExceptionOr<Ref<GainNode>> AudioContext::createGain()
659{
660 ALWAYS_LOG(LOGIDENTIFIER);
661
662 ASSERT(isMainThread());
663 if (m_isStopScheduled)
664 return Exception { InvalidStateError };
665
666 lazyInitialize();
667 return GainNode::create(*this, m_destinationNode->sampleRate());
668}
669
670ExceptionOr<Ref<DelayNode>> AudioContext::createDelay(double maxDelayTime)
671{
672 ALWAYS_LOG(LOGIDENTIFIER);
673
674 ASSERT(isMainThread());
675 if (m_isStopScheduled)
676 return Exception { InvalidStateError };
677
678 lazyInitialize();
679 return DelayNode::create(*this, m_destinationNode->sampleRate(), maxDelayTime);
680}
681
682ExceptionOr<Ref<ChannelSplitterNode>> AudioContext::createChannelSplitter(size_t numberOfOutputs)
683{
684 ALWAYS_LOG(LOGIDENTIFIER);
685
686 ASSERT(isMainThread());
687 if (m_isStopScheduled)
688 return Exception { InvalidStateError };
689
690 lazyInitialize();
691 auto node = ChannelSplitterNode::create(*this, m_destinationNode->sampleRate(), numberOfOutputs);
692 if (!node)
693 return Exception { IndexSizeError };
694 return node.releaseNonNull();
695}
696
697ExceptionOr<Ref<ChannelMergerNode>> AudioContext::createChannelMerger(size_t numberOfInputs)
698{
699 ALWAYS_LOG(LOGIDENTIFIER);
700
701 ASSERT(isMainThread());
702 if (m_isStopScheduled)
703 return Exception { InvalidStateError };
704
705 lazyInitialize();
706 auto node = ChannelMergerNode::create(*this, m_destinationNode->sampleRate(), numberOfInputs);
707 if (!node)
708 return Exception { IndexSizeError };
709 return node.releaseNonNull();
710}
711
712ExceptionOr<Ref<OscillatorNode>> AudioContext::createOscillator()
713{
714 ALWAYS_LOG(LOGIDENTIFIER);
715
716 ASSERT(isMainThread());
717 if (m_isStopScheduled)
718 return Exception { InvalidStateError };
719
720 lazyInitialize();
721
722 Ref<OscillatorNode> node = OscillatorNode::create(*this, m_destinationNode->sampleRate());
723
724 // Because this is an AudioScheduledSourceNode, the context keeps a reference until it has finished playing.
725 // When this happens, AudioScheduledSourceNode::finish() calls AudioContext::notifyNodeFinishedProcessing().
726 refNode(node);
727
728 return node;
729}
730
731ExceptionOr<Ref<PeriodicWave>> AudioContext::createPeriodicWave(Float32Array& real, Float32Array& imaginary)
732{
733 ALWAYS_LOG(LOGIDENTIFIER);
734
735 ASSERT(isMainThread());
736 if (m_isStopScheduled)
737 return Exception { InvalidStateError };
738
739 if (real.length() != imaginary.length() || (real.length() > MaxPeriodicWaveLength) || !real.length())
740 return Exception { IndexSizeError };
741 lazyInitialize();
742 return PeriodicWave::create(sampleRate(), real, imaginary);
743}
744
745void AudioContext::notifyNodeFinishedProcessing(AudioNode* node)
746{
747 ASSERT(isAudioThread());
748 m_finishedNodes.append(node);
749}
750
751void AudioContext::derefFinishedSourceNodes()
752{
753 ASSERT(isGraphOwner());
754 ASSERT(isAudioThread() || isAudioThreadFinished());
755 for (auto& node : m_finishedNodes)
756 derefNode(*node);
757
758 m_finishedNodes.clear();
759}
760
761void AudioContext::refNode(AudioNode& node)
762{
763 ASSERT(isMainThread());
764 AutoLocker locker(*this);
765
766 node.ref(AudioNode::RefTypeConnection);
767 m_referencedNodes.append(&node);
768}
769
770void AudioContext::derefNode(AudioNode& node)
771{
772 ASSERT(isGraphOwner());
773
774 node.deref(AudioNode::RefTypeConnection);
775
776 ASSERT(m_referencedNodes.contains(&node));
777 m_referencedNodes.removeFirst(&node);
778}
779
780void AudioContext::derefUnfinishedSourceNodes()
781{
782 ASSERT(isMainThread() && isAudioThreadFinished());
783 for (auto& node : m_referencedNodes)
784 node->deref(AudioNode::RefTypeConnection);
785
786 m_referencedNodes.clear();
787}
788
789void AudioContext::lock(bool& mustReleaseLock)
790{
791 // Don't allow regular lock in real-time audio thread.
792 ASSERT(isMainThread());
793
794 Thread& thisThread = Thread::current();
795
796 if (&thisThread == m_graphOwnerThread) {
797 // We already have the lock.
798 mustReleaseLock = false;
799 } else {
800 // Acquire the lock.
801 m_contextGraphMutex.lock();
802 m_graphOwnerThread = &thisThread;
803 mustReleaseLock = true;
804 }
805}
806
807bool AudioContext::tryLock(bool& mustReleaseLock)
808{
809 Thread& thisThread = Thread::current();
810 bool isAudioThread = &thisThread == audioThread();
811
812 // Try to catch cases of using try lock on main thread - it should use regular lock.
813 ASSERT(isAudioThread || isAudioThreadFinished());
814
815 if (!isAudioThread) {
816 // In release build treat tryLock() as lock() (since above ASSERT(isAudioThread) never fires) - this is the best we can do.
817 lock(mustReleaseLock);
818 return true;
819 }
820
821 bool hasLock;
822
823 if (&thisThread == m_graphOwnerThread) {
824 // Thread already has the lock.
825 hasLock = true;
826 mustReleaseLock = false;
827 } else {
828 // Don't already have the lock - try to acquire it.
829 hasLock = m_contextGraphMutex.tryLock();
830
831 if (hasLock)
832 m_graphOwnerThread = &thisThread;
833
834 mustReleaseLock = hasLock;
835 }
836
837 return hasLock;
838}
839
840void AudioContext::unlock()
841{
842 ASSERT(m_graphOwnerThread == &Thread::current());
843
844 m_graphOwnerThread = nullptr;
845 m_contextGraphMutex.unlock();
846}
847
848bool AudioContext::isAudioThread() const
849{
850 return m_audioThread == &Thread::current();
851}
852
853bool AudioContext::isGraphOwner() const
854{
855 return m_graphOwnerThread == &Thread::current();
856}
857
858void AudioContext::addDeferredFinishDeref(AudioNode* node)
859{
860 ASSERT(isAudioThread());
861 m_deferredFinishDerefList.append(node);
862}
863
864void AudioContext::handlePreRenderTasks()
865{
866 ASSERT(isAudioThread());
867
868 // At the beginning of every render quantum, try to update the internal rendering graph state (from main thread changes).
869 // It's OK if the tryLock() fails, we'll just take slightly longer to pick up the changes.
870 bool mustReleaseLock;
871 if (tryLock(mustReleaseLock)) {
872 // Fixup the state of any dirty AudioSummingJunctions and AudioNodeOutputs.
873 handleDirtyAudioSummingJunctions();
874 handleDirtyAudioNodeOutputs();
875
876 updateAutomaticPullNodes();
877
878 if (mustReleaseLock)
879 unlock();
880 }
881}
882
883void AudioContext::handlePostRenderTasks()
884{
885 ASSERT(isAudioThread());
886
887 // Must use a tryLock() here too. Don't worry, the lock will very rarely be contended and this method is called frequently.
888 // The worst that can happen is that there will be some nodes which will take slightly longer than usual to be deleted or removed
889 // from the render graph (in which case they'll render silence).
890 bool mustReleaseLock;
891 if (tryLock(mustReleaseLock)) {
892 // Take care of finishing any derefs where the tryLock() failed previously.
893 handleDeferredFinishDerefs();
894
895 // Dynamically clean up nodes which are no longer needed.
896 derefFinishedSourceNodes();
897
898 // Don't delete in the real-time thread. Let the main thread do it.
899 // Ref-counted objects held by certain AudioNodes may not be thread-safe.
900 scheduleNodeDeletion();
901
902 // Fixup the state of any dirty AudioSummingJunctions and AudioNodeOutputs.
903 handleDirtyAudioSummingJunctions();
904 handleDirtyAudioNodeOutputs();
905
906 updateAutomaticPullNodes();
907
908 if (mustReleaseLock)
909 unlock();
910 }
911}
912
913void AudioContext::handleDeferredFinishDerefs()
914{
915 ASSERT(isAudioThread() && isGraphOwner());
916 for (auto& node : m_deferredFinishDerefList)
917 node->finishDeref(AudioNode::RefTypeConnection);
918
919 m_deferredFinishDerefList.clear();
920}
921
922void AudioContext::markForDeletion(AudioNode& node)
923{
924 ASSERT(isGraphOwner());
925
926 if (isAudioThreadFinished())
927 m_nodesToDelete.append(&node);
928 else
929 m_nodesMarkedForDeletion.append(&node);
930
931 // This is probably the best time for us to remove the node from automatic pull list,
932 // since all connections are gone and we hold the graph lock. Then when handlePostRenderTasks()
933 // gets a chance to schedule the deletion work, updateAutomaticPullNodes() also gets a chance to
934 // modify m_renderingAutomaticPullNodes.
935 removeAutomaticPullNode(node);
936}
937
938void AudioContext::scheduleNodeDeletion()
939{
940 bool isGood = m_isInitialized && isGraphOwner();
941 ASSERT(isGood);
942 if (!isGood)
943 return;
944
945 // Make sure to call deleteMarkedNodes() on main thread.
946 if (m_nodesMarkedForDeletion.size() && !m_isDeletionScheduled) {
947 m_nodesToDelete.appendVector(m_nodesMarkedForDeletion);
948 m_nodesMarkedForDeletion.clear();
949
950 m_isDeletionScheduled = true;
951
952 callOnMainThread([protectedThis = makeRef(*this)]() mutable {
953 protectedThis->deleteMarkedNodes();
954 });
955 }
956}
957
958void AudioContext::deleteMarkedNodes()
959{
960 ASSERT(isMainThread());
961
962 // Protect this object from being deleted before we release the mutex locked by AutoLocker.
963 Ref<AudioContext> protectedThis(*this);
964 {
965 AutoLocker locker(*this);
966
967 while (m_nodesToDelete.size()) {
968 AudioNode* node = m_nodesToDelete.takeLast();
969
970 // Before deleting the node, clear out any AudioNodeInputs from m_dirtySummingJunctions.
971 unsigned numberOfInputs = node->numberOfInputs();
972 for (unsigned i = 0; i < numberOfInputs; ++i)
973 m_dirtySummingJunctions.remove(node->input(i));
974
975 // Before deleting the node, clear out any AudioNodeOutputs from m_dirtyAudioNodeOutputs.
976 unsigned numberOfOutputs = node->numberOfOutputs();
977 for (unsigned i = 0; i < numberOfOutputs; ++i)
978 m_dirtyAudioNodeOutputs.remove(node->output(i));
979
980 // Finally, delete it.
981 delete node;
982 }
983 m_isDeletionScheduled = false;
984 }
985}
986
987void AudioContext::markSummingJunctionDirty(AudioSummingJunction* summingJunction)
988{
989 ASSERT(isGraphOwner());
990 m_dirtySummingJunctions.add(summingJunction);
991}
992
993void AudioContext::removeMarkedSummingJunction(AudioSummingJunction* summingJunction)
994{
995 ASSERT(isMainThread());
996 AutoLocker locker(*this);
997 m_dirtySummingJunctions.remove(summingJunction);
998}
999
1000void AudioContext::markAudioNodeOutputDirty(AudioNodeOutput* output)
1001{
1002 ASSERT(isGraphOwner());
1003 m_dirtyAudioNodeOutputs.add(output);
1004}
1005
1006void AudioContext::handleDirtyAudioSummingJunctions()
1007{
1008 ASSERT(isGraphOwner());
1009
1010 for (auto& junction : m_dirtySummingJunctions)
1011 junction->updateRenderingState();
1012
1013 m_dirtySummingJunctions.clear();
1014}
1015
1016void AudioContext::handleDirtyAudioNodeOutputs()
1017{
1018 ASSERT(isGraphOwner());
1019
1020 for (auto& output : m_dirtyAudioNodeOutputs)
1021 output->updateRenderingState();
1022
1023 m_dirtyAudioNodeOutputs.clear();
1024}
1025
1026void AudioContext::addAutomaticPullNode(AudioNode& node)
1027{
1028 ASSERT(isGraphOwner());
1029
1030 if (m_automaticPullNodes.add(&node).isNewEntry)
1031 m_automaticPullNodesNeedUpdating = true;
1032}
1033
1034void AudioContext::removeAutomaticPullNode(AudioNode& node)
1035{
1036 ASSERT(isGraphOwner());
1037
1038 if (m_automaticPullNodes.remove(&node))
1039 m_automaticPullNodesNeedUpdating = true;
1040}
1041
1042void AudioContext::updateAutomaticPullNodes()
1043{
1044 ASSERT(isGraphOwner());
1045
1046 if (m_automaticPullNodesNeedUpdating) {
1047 // Copy from m_automaticPullNodes to m_renderingAutomaticPullNodes.
1048 m_renderingAutomaticPullNodes.resize(m_automaticPullNodes.size());
1049
1050 unsigned i = 0;
1051 for (auto& output : m_automaticPullNodes)
1052 m_renderingAutomaticPullNodes[i++] = output;
1053
1054 m_automaticPullNodesNeedUpdating = false;
1055 }
1056}
1057
1058void AudioContext::processAutomaticPullNodes(size_t framesToProcess)
1059{
1060 ASSERT(isAudioThread());
1061
1062 for (auto& node : m_renderingAutomaticPullNodes)
1063 node->processIfNecessary(framesToProcess);
1064}
1065
1066ScriptExecutionContext* AudioContext::scriptExecutionContext() const
1067{
1068 return ActiveDOMObject::scriptExecutionContext();
1069}
1070
1071void AudioContext::nodeWillBeginPlayback()
1072{
1073 // Called by scheduled AudioNodes when clients schedule their start times.
1074 // Prior to the introduction of suspend(), resume(), and stop(), starting
1075 // a scheduled AudioNode would remove the user-gesture restriction, if present,
1076 // and would thus unmute the context. Now that AudioContext stays in the
1077 // "suspended" state if a user-gesture restriction is present, starting a
1078 // schedule AudioNode should set the state to "running", but only if the
1079 // user-gesture restriction is set.
1080 if (userGestureRequiredForAudioStart())
1081 startRendering();
1082}
1083
1084bool AudioContext::willBeginPlayback()
1085{
1086 if (userGestureRequiredForAudioStart()) {
1087 if (!processingUserGestureForMedia() && !document()->isCapturing()) {
1088 ALWAYS_LOG(LOGIDENTIFIER, "returning false, not processing user gesture or capturing");
1089 return false;
1090 }
1091 removeBehaviorRestriction(AudioContext::RequireUserGestureForAudioStartRestriction);
1092 }
1093
1094 if (pageConsentRequiredForAudioStart()) {
1095 Page* page = document()->page();
1096 if (page && !page->canStartMedia()) {
1097 document()->addMediaCanStartListener(*this);
1098 ALWAYS_LOG(LOGIDENTIFIER, "returning false, page doesn't allow media to start");
1099 return false;
1100 }
1101 removeBehaviorRestriction(AudioContext::RequirePageConsentForAudioStartRestriction);
1102 }
1103
1104 auto willBegin = m_mediaSession->clientWillBeginPlayback();
1105 ALWAYS_LOG(LOGIDENTIFIER, "returning ", willBegin);
1106
1107 return willBegin;
1108}
1109
1110bool AudioContext::willPausePlayback()
1111{
1112 if (userGestureRequiredForAudioStart()) {
1113 if (!processingUserGestureForMedia())
1114 return false;
1115 removeBehaviorRestriction(AudioContext::RequireUserGestureForAudioStartRestriction);
1116 }
1117
1118 if (pageConsentRequiredForAudioStart()) {
1119 Page* page = document()->page();
1120 if (page && !page->canStartMedia()) {
1121 document()->addMediaCanStartListener(*this);
1122 return false;
1123 }
1124 removeBehaviorRestriction(AudioContext::RequirePageConsentForAudioStartRestriction);
1125 }
1126
1127 return m_mediaSession->clientWillPausePlayback();
1128}
1129
1130void AudioContext::startRendering()
1131{
1132 ALWAYS_LOG(LOGIDENTIFIER);
1133 if (m_isStopScheduled || !willBeginPlayback())
1134 return;
1135
1136 destination()->startRendering();
1137 setState(State::Running);
1138}
1139
1140void AudioContext::mediaCanStart(Document& document)
1141{
1142 ASSERT_UNUSED(document, &document == this->document());
1143 removeBehaviorRestriction(AudioContext::RequirePageConsentForAudioStartRestriction);
1144 mayResumePlayback(true);
1145}
1146
1147MediaProducer::MediaStateFlags AudioContext::mediaState() const
1148{
1149 if (!m_isStopScheduled && m_destinationNode && m_destinationNode->isPlayingAudio())
1150 return MediaProducer::IsPlayingAudio;
1151
1152 return MediaProducer::IsNotPlaying;
1153}
1154
1155void AudioContext::pageMutedStateDidChange()
1156{
1157 if (m_destinationNode && document()->page())
1158 m_destinationNode->setMuted(document()->page()->isAudioMuted());
1159}
1160
1161void AudioContext::isPlayingAudioDidChange()
1162{
1163 // Make sure to call Document::updateIsPlayingMedia() on the main thread, since
1164 // we could be on the audio I/O thread here and the call into WebCore could block.
1165 callOnMainThread([protectedThis = makeRef(*this)] {
1166 if (protectedThis->document())
1167 protectedThis->document()->updateIsPlayingMedia();
1168 });
1169}
1170
1171void AudioContext::fireCompletionEvent()
1172{
1173 ASSERT(isMainThread());
1174 if (!isMainThread())
1175 return;
1176
1177 ALWAYS_LOG(LOGIDENTIFIER);
1178
1179 AudioBuffer* renderedBuffer = m_renderTarget.get();
1180 setState(State::Closed);
1181
1182 ASSERT(renderedBuffer);
1183 if (!renderedBuffer)
1184 return;
1185
1186 // Avoid firing the event if the document has already gone away.
1187 if (!m_isStopScheduled) {
1188 // Call the offline rendering completion event listener.
1189 m_eventQueue->enqueueEvent(OfflineAudioCompletionEvent::create(renderedBuffer));
1190 }
1191}
1192
1193void AudioContext::incrementActiveSourceCount()
1194{
1195 ++m_activeSourceCount;
1196}
1197
1198void AudioContext::decrementActiveSourceCount()
1199{
1200 --m_activeSourceCount;
1201}
1202
1203void AudioContext::suspend(DOMPromiseDeferred<void>&& promise)
1204{
1205 if (isOfflineContext() || m_isStopScheduled) {
1206 promise.reject(InvalidStateError);
1207 return;
1208 }
1209
1210 if (m_state == State::Suspended) {
1211 promise.resolve();
1212 return;
1213 }
1214
1215 if (m_state == State::Closed || m_state == State::Interrupted || !m_destinationNode) {
1216 promise.reject();
1217 return;
1218 }
1219
1220 addReaction(State::Suspended, WTFMove(promise));
1221
1222 if (!willPausePlayback())
1223 return;
1224
1225 lazyInitialize();
1226
1227 m_destinationNode->suspend([this, protectedThis = makeRef(*this)] {
1228 setState(State::Suspended);
1229 });
1230}
1231
1232void AudioContext::resume(DOMPromiseDeferred<void>&& promise)
1233{
1234 if (isOfflineContext() || m_isStopScheduled) {
1235 promise.reject(InvalidStateError);
1236 return;
1237 }
1238
1239 if (m_state == State::Running) {
1240 promise.resolve();
1241 return;
1242 }
1243
1244 if (m_state == State::Closed || !m_destinationNode) {
1245 promise.reject();
1246 return;
1247 }
1248
1249 addReaction(State::Running, WTFMove(promise));
1250
1251 if (!willBeginPlayback())
1252 return;
1253
1254 lazyInitialize();
1255
1256 m_destinationNode->resume([this, protectedThis = makeRef(*this)] {
1257 setState(State::Running);
1258 });
1259}
1260
1261void AudioContext::close(DOMPromiseDeferred<void>&& promise)
1262{
1263 if (isOfflineContext() || m_isStopScheduled) {
1264 promise.reject(InvalidStateError);
1265 return;
1266 }
1267
1268 if (m_state == State::Closed || !m_destinationNode) {
1269 promise.resolve();
1270 return;
1271 }
1272
1273 addReaction(State::Closed, WTFMove(promise));
1274
1275 lazyInitialize();
1276
1277 m_destinationNode->close([this, protectedThis = makeRef(*this)] {
1278 setState(State::Closed);
1279 uninitialize();
1280 });
1281}
1282
1283
1284void AudioContext::suspendPlayback()
1285{
1286 if (!m_destinationNode || m_state == State::Closed)
1287 return;
1288
1289 if (m_state == State::Suspended) {
1290 if (m_mediaSession->state() == PlatformMediaSession::Interrupted)
1291 setState(State::Interrupted);
1292 return;
1293 }
1294
1295 lazyInitialize();
1296
1297 m_destinationNode->suspend([this, protectedThis = makeRef(*this)] {
1298 bool interrupted = m_mediaSession->state() == PlatformMediaSession::Interrupted;
1299 setState(interrupted ? State::Interrupted : State::Suspended);
1300 });
1301}
1302
1303void AudioContext::mayResumePlayback(bool shouldResume)
1304{
1305 if (!m_destinationNode || m_state == State::Closed || m_state == State::Running)
1306 return;
1307
1308 if (!shouldResume) {
1309 setState(State::Suspended);
1310 return;
1311 }
1312
1313 if (!willBeginPlayback())
1314 return;
1315
1316 lazyInitialize();
1317
1318 m_destinationNode->resume([this, protectedThis = makeRef(*this)] {
1319 setState(State::Running);
1320 });
1321}
1322
1323void AudioContext::postTask(WTF::Function<void()>&& task)
1324{
1325 if (m_isStopScheduled)
1326 return;
1327
1328 m_scriptExecutionContext->postTask(WTFMove(task));
1329}
1330
1331const SecurityOrigin* AudioContext::origin() const
1332{
1333 return m_scriptExecutionContext ? m_scriptExecutionContext->securityOrigin() : nullptr;
1334}
1335
1336void AudioContext::addConsoleMessage(MessageSource source, MessageLevel level, const String& message)
1337{
1338 if (m_scriptExecutionContext)
1339 m_scriptExecutionContext->addConsoleMessage(source, level, message);
1340}
1341
1342#if !RELEASE_LOG_DISABLED
1343WTFLogChannel& AudioContext::logChannel() const
1344{
1345 return LogMedia;
1346}
1347#endif
1348
1349} // namespace WebCore
1350
1351#endif // ENABLE(WEB_AUDIO)
1352