| 1 | /* |
| 2 | * Copyright (C) 2010 Google Inc. All rights reserved. |
| 3 | * Copyright (C) 2016-2019 Apple Inc. All rights reserved. |
| 4 | * |
| 5 | * Redistribution and use in source and binary forms, with or without |
| 6 | * modification, are permitted provided that the following conditions |
| 7 | * are met: |
| 8 | * 1. Redistributions of source code must retain the above copyright |
| 9 | * notice, this list of conditions and the following disclaimer. |
| 10 | * 2. Redistributions in binary form must reproduce the above copyright |
| 11 | * notice, this list of conditions and the following disclaimer in the |
| 12 | * documentation and/or other materials provided with the distribution. |
| 13 | * |
| 14 | * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY |
| 15 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED |
| 16 | * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE |
| 17 | * DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY |
| 18 | * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES |
| 19 | * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; |
| 20 | * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON |
| 21 | * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| 22 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS |
| 23 | * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 24 | */ |
| 25 | |
| 26 | #pragma once |
| 27 | |
| 28 | #include "ActiveDOMObject.h" |
| 29 | #include "AsyncAudioDecoder.h" |
| 30 | #include "AudioBus.h" |
| 31 | #include "AudioDestinationNode.h" |
| 32 | #include "EventTarget.h" |
| 33 | #include "JSDOMPromiseDeferred.h" |
| 34 | #include "MediaCanStartListener.h" |
| 35 | #include "MediaProducer.h" |
| 36 | #include "PlatformMediaSession.h" |
| 37 | #include "ScriptExecutionContext.h" |
| 38 | #include "VisibilityChangeClient.h" |
| 39 | #include <JavaScriptCore/ConsoleTypes.h> |
| 40 | #include <JavaScriptCore/Float32Array.h> |
| 41 | #include <atomic> |
| 42 | #include <wtf/HashSet.h> |
| 43 | #include <wtf/LoggerHelper.h> |
| 44 | #include <wtf/MainThread.h> |
| 45 | #include <wtf/RefPtr.h> |
| 46 | #include <wtf/ThreadSafeRefCounted.h> |
| 47 | #include <wtf/Threading.h> |
| 48 | #include <wtf/Vector.h> |
| 49 | #include <wtf/text/AtomicStringHash.h> |
| 50 | |
| 51 | namespace WebCore { |
| 52 | |
| 53 | class AnalyserNode; |
| 54 | class AudioBuffer; |
| 55 | class AudioBufferCallback; |
| 56 | class AudioBufferSourceNode; |
| 57 | class AudioListener; |
| 58 | class AudioSummingJunction; |
| 59 | class BiquadFilterNode; |
| 60 | class ChannelMergerNode; |
| 61 | class ChannelSplitterNode; |
| 62 | class ConvolverNode; |
| 63 | class DelayNode; |
| 64 | class Document; |
| 65 | class DynamicsCompressorNode; |
| 66 | class GainNode; |
| 67 | class GenericEventQueue; |
| 68 | class HTMLMediaElement; |
| 69 | class MediaElementAudioSourceNode; |
| 70 | class MediaStream; |
| 71 | class MediaStreamAudioDestinationNode; |
| 72 | class MediaStreamAudioSourceNode; |
| 73 | class OscillatorNode; |
| 74 | class PannerNode; |
| 75 | class PeriodicWave; |
| 76 | class ScriptProcessorNode; |
| 77 | class SecurityOrigin; |
| 78 | class WaveShaperNode; |
| 79 | |
| 80 | // AudioContext is the cornerstone of the web audio API and all AudioNodes are created from it. |
| 81 | // For thread safety between the audio thread and the main thread, it has a rendering graph locking mechanism. |
| 82 | |
| 83 | class AudioContext |
| 84 | : public ActiveDOMObject |
| 85 | , public ThreadSafeRefCounted<AudioContext> |
| 86 | , public EventTargetWithInlineData |
| 87 | , public MediaCanStartListener |
| 88 | , public MediaProducer |
| 89 | , private PlatformMediaSessionClient |
| 90 | , private VisibilityChangeClient |
| 91 | #if !RELEASE_LOG_DISABLED |
| 92 | , private LoggerHelper |
| 93 | #endif |
| 94 | { |
| 95 | WTF_MAKE_ISO_ALLOCATED(AudioContext); |
| 96 | public: |
| 97 | // Create an AudioContext for rendering to the audio hardware. |
| 98 | static RefPtr<AudioContext> create(Document&); |
| 99 | |
| 100 | virtual ~AudioContext(); |
| 101 | |
| 102 | bool isInitialized() const; |
| 103 | |
| 104 | bool isOfflineContext() const { return m_isOfflineContext; } |
| 105 | |
| 106 | Document* document() const; // ASSERTs if document no longer exists. |
| 107 | |
| 108 | Document* hostingDocument() const final; |
| 109 | |
| 110 | AudioDestinationNode* destination() { return m_destinationNode.get(); } |
| 111 | size_t currentSampleFrame() const { return m_destinationNode->currentSampleFrame(); } |
| 112 | double currentTime() const { return m_destinationNode->currentTime(); } |
| 113 | float sampleRate() const { return m_destinationNode->sampleRate(); } |
| 114 | unsigned long activeSourceCount() const { return static_cast<unsigned long>(m_activeSourceCount); } |
| 115 | |
| 116 | void incrementActiveSourceCount(); |
| 117 | void decrementActiveSourceCount(); |
| 118 | |
| 119 | ExceptionOr<Ref<AudioBuffer>> createBuffer(unsigned numberOfChannels, size_t numberOfFrames, float sampleRate); |
| 120 | ExceptionOr<Ref<AudioBuffer>> createBuffer(ArrayBuffer&, bool mixToMono); |
| 121 | |
| 122 | // Asynchronous audio file data decoding. |
| 123 | void decodeAudioData(Ref<ArrayBuffer>&&, RefPtr<AudioBufferCallback>&&, RefPtr<AudioBufferCallback>&&); |
| 124 | |
| 125 | AudioListener* listener() { return m_listener.get(); } |
| 126 | |
| 127 | using ActiveDOMObject::suspend; |
| 128 | using ActiveDOMObject::resume; |
| 129 | |
| 130 | void suspend(DOMPromiseDeferred<void>&&); |
| 131 | void resume(DOMPromiseDeferred<void>&&); |
| 132 | void close(DOMPromiseDeferred<void>&&); |
| 133 | |
| 134 | enum class State { Suspended, Running, Interrupted, Closed }; |
| 135 | State state() const; |
| 136 | bool isClosed() const { return m_state == State::Closed; } |
| 137 | |
| 138 | bool wouldTaintOrigin(const URL&) const; |
| 139 | |
| 140 | // The AudioNode create methods are called on the main thread (from JavaScript). |
| 141 | ExceptionOr<Ref<AudioBufferSourceNode>> createBufferSource(); |
| 142 | #if ENABLE(VIDEO) |
| 143 | ExceptionOr<Ref<MediaElementAudioSourceNode>> createMediaElementSource(HTMLMediaElement&); |
| 144 | #endif |
| 145 | #if ENABLE(MEDIA_STREAM) |
| 146 | ExceptionOr<Ref<MediaStreamAudioSourceNode>> createMediaStreamSource(MediaStream&); |
| 147 | ExceptionOr<Ref<MediaStreamAudioDestinationNode>> createMediaStreamDestination(); |
| 148 | #endif |
| 149 | ExceptionOr<Ref<GainNode>> createGain(); |
| 150 | ExceptionOr<Ref<BiquadFilterNode>> createBiquadFilter(); |
| 151 | ExceptionOr<Ref<WaveShaperNode>> createWaveShaper(); |
| 152 | ExceptionOr<Ref<DelayNode>> createDelay(double maxDelayTime); |
| 153 | ExceptionOr<Ref<PannerNode>> createPanner(); |
| 154 | ExceptionOr<Ref<ConvolverNode>> createConvolver(); |
| 155 | ExceptionOr<Ref<DynamicsCompressorNode>> createDynamicsCompressor(); |
| 156 | ExceptionOr<Ref<AnalyserNode>> createAnalyser(); |
| 157 | ExceptionOr<Ref<ScriptProcessorNode>> createScriptProcessor(size_t bufferSize, size_t numberOfInputChannels, size_t numberOfOutputChannels); |
| 158 | ExceptionOr<Ref<ChannelSplitterNode>> createChannelSplitter(size_t numberOfOutputs); |
| 159 | ExceptionOr<Ref<ChannelMergerNode>> createChannelMerger(size_t numberOfInputs); |
| 160 | ExceptionOr<Ref<OscillatorNode>> createOscillator(); |
| 161 | ExceptionOr<Ref<PeriodicWave>> createPeriodicWave(Float32Array& real, Float32Array& imaginary); |
| 162 | |
| 163 | // When a source node has no more processing to do (has finished playing), then it tells the context to dereference it. |
| 164 | void notifyNodeFinishedProcessing(AudioNode*); |
| 165 | |
| 166 | // Called at the start of each render quantum. |
| 167 | void handlePreRenderTasks(); |
| 168 | |
| 169 | // Called at the end of each render quantum. |
| 170 | void handlePostRenderTasks(); |
| 171 | |
| 172 | // Called periodically at the end of each render quantum to dereference finished source nodes. |
| 173 | void derefFinishedSourceNodes(); |
| 174 | |
| 175 | // We schedule deletion of all marked nodes at the end of each realtime render quantum. |
| 176 | void markForDeletion(AudioNode&); |
| 177 | void deleteMarkedNodes(); |
| 178 | |
| 179 | // AudioContext can pull node(s) at the end of each render quantum even when they are not connected to any downstream nodes. |
| 180 | // These two methods are called by the nodes who want to add/remove themselves into/from the automatic pull lists. |
| 181 | void addAutomaticPullNode(AudioNode&); |
| 182 | void removeAutomaticPullNode(AudioNode&); |
| 183 | |
| 184 | // Called right before handlePostRenderTasks() to handle nodes which need to be pulled even when they are not connected to anything. |
| 185 | void processAutomaticPullNodes(size_t framesToProcess); |
| 186 | |
| 187 | // Keeps track of the number of connections made. |
| 188 | void incrementConnectionCount() |
| 189 | { |
| 190 | ASSERT(isMainThread()); |
| 191 | m_connectionCount++; |
| 192 | } |
| 193 | |
| 194 | unsigned connectionCount() const { return m_connectionCount; } |
| 195 | |
| 196 | // |
| 197 | // Thread Safety and Graph Locking: |
| 198 | // |
| 199 | |
| 200 | void setAudioThread(Thread& thread) { m_audioThread = &thread; } // FIXME: check either not initialized or the same |
| 201 | Thread* audioThread() const { return m_audioThread; } |
| 202 | bool isAudioThread() const; |
| 203 | |
| 204 | // Returns true only after the audio thread has been started and then shutdown. |
| 205 | bool isAudioThreadFinished() { return m_isAudioThreadFinished; } |
| 206 | |
| 207 | // mustReleaseLock is set to true if we acquired the lock in this method call and caller must unlock(), false if it was previously acquired. |
| 208 | void lock(bool& mustReleaseLock); |
| 209 | |
| 210 | // Returns true if we own the lock. |
| 211 | // mustReleaseLock is set to true if we acquired the lock in this method call and caller must unlock(), false if it was previously acquired. |
| 212 | bool tryLock(bool& mustReleaseLock); |
| 213 | |
| 214 | void unlock(); |
| 215 | |
| 216 | // Returns true if this thread owns the context's lock. |
| 217 | bool isGraphOwner() const; |
| 218 | |
| 219 | // Returns the maximum number of channels we can support. |
| 220 | static unsigned maxNumberOfChannels() { return MaxNumberOfChannels; } |
| 221 | |
| 222 | class AutoLocker { |
| 223 | public: |
| 224 | explicit AutoLocker(AudioContext& context) |
| 225 | : m_context(context) |
| 226 | { |
| 227 | m_context.lock(m_mustReleaseLock); |
| 228 | } |
| 229 | |
| 230 | ~AutoLocker() |
| 231 | { |
| 232 | if (m_mustReleaseLock) |
| 233 | m_context.unlock(); |
| 234 | } |
| 235 | |
| 236 | private: |
| 237 | AudioContext& m_context; |
| 238 | bool m_mustReleaseLock; |
| 239 | }; |
| 240 | |
| 241 | // In AudioNode::deref() a tryLock() is used for calling finishDeref(), but if it fails keep track here. |
| 242 | void addDeferredFinishDeref(AudioNode*); |
| 243 | |
| 244 | // In the audio thread at the start of each render cycle, we'll call handleDeferredFinishDerefs(). |
| 245 | void handleDeferredFinishDerefs(); |
| 246 | |
| 247 | // Only accessed when the graph lock is held. |
| 248 | void markSummingJunctionDirty(AudioSummingJunction*); |
| 249 | void markAudioNodeOutputDirty(AudioNodeOutput*); |
| 250 | |
| 251 | // Must be called on main thread. |
| 252 | void removeMarkedSummingJunction(AudioSummingJunction*); |
| 253 | |
| 254 | // EventTarget |
| 255 | EventTargetInterface eventTargetInterface() const final { return AudioContextEventTargetInterfaceType; } |
| 256 | |
| 257 | // Reconcile ref/deref which are defined both in ThreadSafeRefCounted and EventTarget. |
| 258 | using ThreadSafeRefCounted::ref; |
| 259 | using ThreadSafeRefCounted::deref; |
| 260 | |
| 261 | void startRendering(); |
| 262 | void fireCompletionEvent(); |
| 263 | |
| 264 | static unsigned s_hardwareContextCount; |
| 265 | |
| 266 | // Restrictions to change default behaviors. |
| 267 | enum BehaviorRestrictionFlags { |
| 268 | NoRestrictions = 0, |
| 269 | RequireUserGestureForAudioStartRestriction = 1 << 0, |
| 270 | RequirePageConsentForAudioStartRestriction = 1 << 1, |
| 271 | }; |
| 272 | typedef unsigned BehaviorRestrictions; |
| 273 | |
| 274 | BehaviorRestrictions behaviorRestrictions() const { return m_restrictions; } |
| 275 | void addBehaviorRestriction(BehaviorRestrictions restriction) { m_restrictions |= restriction; } |
| 276 | void removeBehaviorRestriction(BehaviorRestrictions restriction) { m_restrictions &= ~restriction; } |
| 277 | |
| 278 | void isPlayingAudioDidChange(); |
| 279 | |
| 280 | void nodeWillBeginPlayback(); |
| 281 | |
| 282 | #if !RELEASE_LOG_DISABLED |
| 283 | const Logger& logger() const final { return m_logger.get(); } |
| 284 | const void* logIdentifier() const final { return m_logIdentifier; } |
| 285 | WTFLogChannel& logChannel() const final; |
| 286 | const void* nextAudioNodeLogIdentifier() { return childLogIdentifier(++m_nextAudioNodeIdentifier); } |
| 287 | const void* nextAudioParameterLogIdentifier() { return childLogIdentifier(++m_nextAudioParameterIdentifier); } |
| 288 | #endif |
| 289 | |
| 290 | void postTask(WTF::Function<void()>&&); |
| 291 | bool isStopped() const { return m_isStopScheduled; } |
| 292 | const SecurityOrigin* origin() const; |
| 293 | void addConsoleMessage(MessageSource, MessageLevel, const String& message); |
| 294 | |
| 295 | protected: |
| 296 | explicit AudioContext(Document&); |
| 297 | AudioContext(Document&, unsigned numberOfChannels, size_t numberOfFrames, float sampleRate); |
| 298 | |
| 299 | static bool isSampleRateRangeGood(float sampleRate); |
| 300 | |
| 301 | private: |
| 302 | void constructCommon(); |
| 303 | |
| 304 | void lazyInitialize(); |
| 305 | void uninitialize(); |
| 306 | |
| 307 | bool willBeginPlayback(); |
| 308 | bool willPausePlayback(); |
| 309 | |
| 310 | bool userGestureRequiredForAudioStart() const { return !isOfflineContext() && m_restrictions & RequireUserGestureForAudioStartRestriction; } |
| 311 | bool pageConsentRequiredForAudioStart() const { return !isOfflineContext() && m_restrictions & RequirePageConsentForAudioStartRestriction; } |
| 312 | |
| 313 | void setState(State); |
| 314 | |
| 315 | void clear(); |
| 316 | |
| 317 | void scheduleNodeDeletion(); |
| 318 | |
| 319 | void mediaCanStart(Document&) override; |
| 320 | |
| 321 | // EventTarget |
| 322 | ScriptExecutionContext* scriptExecutionContext() const final; |
| 323 | |
| 324 | // MediaProducer |
| 325 | MediaProducer::MediaStateFlags mediaState() const override; |
| 326 | void pageMutedStateDidChange() override; |
| 327 | |
| 328 | // The context itself keeps a reference to all source nodes. The source nodes, then reference all nodes they're connected to. |
| 329 | // In turn, these nodes reference all nodes they're connected to. All nodes are ultimately connected to the AudioDestinationNode. |
| 330 | // When the context dereferences a source node, it will be deactivated from the rendering graph along with all other nodes it is |
| 331 | // uniquely connected to. See the AudioNode::ref() and AudioNode::deref() methods for more details. |
| 332 | void refNode(AudioNode&); |
| 333 | void derefNode(AudioNode&); |
| 334 | |
| 335 | // ActiveDOMObject API. |
| 336 | void stop() override; |
| 337 | bool canSuspendForDocumentSuspension() const override; |
| 338 | const char* activeDOMObjectName() const override; |
| 339 | |
| 340 | // When the context goes away, there might still be some sources which haven't finished playing. |
| 341 | // Make sure to dereference them here. |
| 342 | void derefUnfinishedSourceNodes(); |
| 343 | |
| 344 | // PlatformMediaSessionClient |
| 345 | PlatformMediaSession::MediaType mediaType() const override { return PlatformMediaSession::WebAudio; } |
| 346 | PlatformMediaSession::MediaType presentationType() const override { return PlatformMediaSession::WebAudio; } |
| 347 | PlatformMediaSession::CharacteristicsFlags characteristics() const override { return m_state == State::Running ? PlatformMediaSession::HasAudio : PlatformMediaSession::HasNothing; } |
| 348 | void mayResumePlayback(bool shouldResume) override; |
| 349 | void suspendPlayback() override; |
| 350 | bool canReceiveRemoteControlCommands() const override { return false; } |
| 351 | void didReceiveRemoteControlCommand(PlatformMediaSession::RemoteControlCommandType, const PlatformMediaSession::RemoteCommandArgument*) override { } |
| 352 | bool supportsSeeking() const override { return false; } |
| 353 | bool shouldOverrideBackgroundPlaybackRestriction(PlatformMediaSession::InterruptionType) const override { return false; } |
| 354 | String sourceApplicationIdentifier() const override; |
| 355 | bool canProduceAudio() const final { return true; } |
| 356 | bool isSuspended() const final; |
| 357 | bool processingUserGestureForMedia() const final; |
| 358 | |
| 359 | void visibilityStateChanged() final; |
| 360 | |
| 361 | // EventTarget |
| 362 | void refEventTarget() override { ref(); } |
| 363 | void derefEventTarget() override { deref(); } |
| 364 | |
| 365 | void handleDirtyAudioSummingJunctions(); |
| 366 | void handleDirtyAudioNodeOutputs(); |
| 367 | |
| 368 | void addReaction(State, DOMPromiseDeferred<void>&&); |
| 369 | void updateAutomaticPullNodes(); |
| 370 | |
| 371 | #if !RELEASE_LOG_DISABLED |
| 372 | const char* logClassName() const final { return "AudioContext" ; } |
| 373 | |
| 374 | Ref<Logger> m_logger; |
| 375 | const void* m_logIdentifier; |
| 376 | uint64_t m_nextAudioNodeIdentifier { 0 }; |
| 377 | uint64_t m_nextAudioParameterIdentifier { 0 }; |
| 378 | #endif |
| 379 | |
| 380 | // Only accessed in the audio thread. |
| 381 | Vector<AudioNode*> m_finishedNodes; |
| 382 | |
| 383 | // We don't use RefPtr<AudioNode> here because AudioNode has a more complex ref() / deref() implementation |
| 384 | // with an optional argument for refType. We need to use the special refType: RefTypeConnection |
| 385 | // Either accessed when the graph lock is held, or on the main thread when the audio thread has finished. |
| 386 | Vector<AudioNode*> m_referencedNodes; |
| 387 | |
| 388 | // Accumulate nodes which need to be deleted here. |
| 389 | // This is copied to m_nodesToDelete at the end of a render cycle in handlePostRenderTasks(), where we're assured of a stable graph |
| 390 | // state which will have no references to any of the nodes in m_nodesToDelete once the context lock is released |
| 391 | // (when handlePostRenderTasks() has completed). |
| 392 | Vector<AudioNode*> m_nodesMarkedForDeletion; |
| 393 | |
| 394 | // They will be scheduled for deletion (on the main thread) at the end of a render cycle (in realtime thread). |
| 395 | Vector<AudioNode*> m_nodesToDelete; |
| 396 | |
| 397 | bool m_isDeletionScheduled { false }; |
| 398 | bool m_isStopScheduled { false }; |
| 399 | bool m_isInitialized { false }; |
| 400 | bool m_isAudioThreadFinished { false }; |
| 401 | bool m_automaticPullNodesNeedUpdating { false }; |
| 402 | bool m_isOfflineContext { false }; |
| 403 | |
| 404 | // Only accessed when the graph lock is held. |
| 405 | HashSet<AudioSummingJunction*> m_dirtySummingJunctions; |
| 406 | HashSet<AudioNodeOutput*> m_dirtyAudioNodeOutputs; |
| 407 | |
| 408 | // For the sake of thread safety, we maintain a seperate Vector of automatic pull nodes for rendering in m_renderingAutomaticPullNodes. |
| 409 | // It will be copied from m_automaticPullNodes by updateAutomaticPullNodes() at the very start or end of the rendering quantum. |
| 410 | HashSet<AudioNode*> m_automaticPullNodes; |
| 411 | Vector<AudioNode*> m_renderingAutomaticPullNodes; |
| 412 | // Only accessed in the audio thread. |
| 413 | Vector<AudioNode*> m_deferredFinishDerefList; |
| 414 | Vector<Vector<DOMPromiseDeferred<void>>> m_stateReactions; |
| 415 | |
| 416 | std::unique_ptr<PlatformMediaSession> m_mediaSession; |
| 417 | std::unique_ptr<GenericEventQueue> m_eventQueue; |
| 418 | |
| 419 | RefPtr<AudioBuffer> m_renderTarget; |
| 420 | RefPtr<AudioDestinationNode> m_destinationNode; |
| 421 | RefPtr<AudioListener> m_listener; |
| 422 | |
| 423 | unsigned m_connectionCount { 0 }; |
| 424 | |
| 425 | // Graph locking. |
| 426 | Lock m_contextGraphMutex; |
| 427 | // FIXME: Using volatile seems incorrect. |
| 428 | // https://bugs.webkit.org/show_bug.cgi?id=180332 |
| 429 | Thread* volatile m_audioThread { nullptr }; |
| 430 | Thread* volatile m_graphOwnerThread { nullptr }; // if the lock is held then this is the thread which owns it, otherwise == nullptr. |
| 431 | |
| 432 | AsyncAudioDecoder m_audioDecoder; |
| 433 | |
| 434 | // This is considering 32 is large enough for multiple channels audio. |
| 435 | // It is somewhat arbitrary and could be increased if necessary. |
| 436 | enum { MaxNumberOfChannels = 32 }; |
| 437 | |
| 438 | // Number of AudioBufferSourceNodes that are active (playing). |
| 439 | std::atomic<int> m_activeSourceCount { 0 }; |
| 440 | |
| 441 | BehaviorRestrictions m_restrictions { NoRestrictions }; |
| 442 | |
| 443 | State m_state { State::Suspended }; |
| 444 | }; |
| 445 | |
| 446 | // FIXME: Find out why these ==/!= functions are needed and remove them if possible. |
| 447 | |
| 448 | inline bool operator==(const AudioContext& lhs, const AudioContext& rhs) |
| 449 | { |
| 450 | return &lhs == &rhs; |
| 451 | } |
| 452 | |
| 453 | inline bool operator!=(const AudioContext& lhs, const AudioContext& rhs) |
| 454 | { |
| 455 | return &lhs != &rhs; |
| 456 | } |
| 457 | |
| 458 | inline AudioContext::State AudioContext::state() const |
| 459 | { |
| 460 | return m_state; |
| 461 | } |
| 462 | |
| 463 | } // WebCore |
| 464 | |