| 1 | /* |
| 2 | * Copyright (C) 2010, Google Inc. All rights reserved. |
| 3 | * |
| 4 | * Redistribution and use in source and binary forms, with or without |
| 5 | * modification, are permitted provided that the following conditions |
| 6 | * are met: |
| 7 | * 1. Redistributions of source code must retain the above copyright |
| 8 | * notice, this list of conditions and the following disclaimer. |
| 9 | * 2. Redistributions in binary form must reproduce the above copyright |
| 10 | * notice, this list of conditions and the following disclaimer in the |
| 11 | * documentation and/or other materials provided with the distribution. |
| 12 | * |
| 13 | * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY |
| 14 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED |
| 15 | * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE |
| 16 | * DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY |
| 17 | * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES |
| 18 | * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; |
| 19 | * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON |
| 20 | * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| 21 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS |
| 22 | * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 23 | */ |
| 24 | |
| 25 | #include "config.h" |
| 26 | |
| 27 | #if ENABLE(WEB_AUDIO) |
| 28 | |
| 29 | #include "AudioNode.h" |
| 30 | |
| 31 | #include "AudioContext.h" |
| 32 | #include "AudioNodeInput.h" |
| 33 | #include "AudioNodeOutput.h" |
| 34 | #include "AudioParam.h" |
| 35 | #include "Logging.h" |
| 36 | #include <wtf/Atomics.h> |
| 37 | #include <wtf/IsoMallocInlines.h> |
| 38 | #include <wtf/MainThread.h> |
| 39 | |
| 40 | #if DEBUG_AUDIONODE_REFERENCES |
| 41 | #include <stdio.h> |
| 42 | #endif |
| 43 | |
| 44 | namespace WebCore { |
| 45 | |
| 46 | WTF_MAKE_ISO_ALLOCATED_IMPL(AudioNode); |
| 47 | |
| 48 | String convertEnumerationToString(AudioNode::NodeType enumerationValue) |
| 49 | { |
| 50 | static const NeverDestroyed<String> values[] = { |
| 51 | MAKE_STATIC_STRING_IMPL("NodeTypeUnknown" ), |
| 52 | MAKE_STATIC_STRING_IMPL("NodeTypeDestination" ), |
| 53 | MAKE_STATIC_STRING_IMPL("NodeTypeOscillator" ), |
| 54 | MAKE_STATIC_STRING_IMPL("NodeTypeAudioBufferSource" ), |
| 55 | MAKE_STATIC_STRING_IMPL("NodeTypeMediaElementAudioSource" ), |
| 56 | MAKE_STATIC_STRING_IMPL("NodeTypeMediaStreamAudioDestination" ), |
| 57 | MAKE_STATIC_STRING_IMPL("NodeTypeMediaStreamAudioSource" ), |
| 58 | MAKE_STATIC_STRING_IMPL("NodeTypeJavaScript" ), |
| 59 | MAKE_STATIC_STRING_IMPL("NodeTypeBiquadFilter" ), |
| 60 | MAKE_STATIC_STRING_IMPL("NodeTypePanner" ), |
| 61 | MAKE_STATIC_STRING_IMPL("NodeTypeConvolver" ), |
| 62 | MAKE_STATIC_STRING_IMPL("NodeTypeDelay" ), |
| 63 | MAKE_STATIC_STRING_IMPL("NodeTypeGain" ), |
| 64 | MAKE_STATIC_STRING_IMPL("NodeTypeChannelSplitter" ), |
| 65 | MAKE_STATIC_STRING_IMPL("NodeTypeChannelMerger" ), |
| 66 | MAKE_STATIC_STRING_IMPL("NodeTypeAnalyser" ), |
| 67 | MAKE_STATIC_STRING_IMPL("NodeTypeDynamicsCompressor" ), |
| 68 | MAKE_STATIC_STRING_IMPL("NodeTypeWaveShaper" ), |
| 69 | MAKE_STATIC_STRING_IMPL("NodeTypeBasicInspector" ), |
| 70 | MAKE_STATIC_STRING_IMPL("NodeTypeEnd" ), |
| 71 | }; |
| 72 | static_assert(static_cast<size_t>(AudioNode::NodeTypeUnknown) == 0, "AudioNode::NodeTypeUnknown is not 0 as expected" ); |
| 73 | static_assert(static_cast<size_t>(AudioNode::NodeTypeDestination) == 1, "AudioNode::NodeTypeDestination is not 1 as expected" ); |
| 74 | static_assert(static_cast<size_t>(AudioNode::NodeTypeOscillator) == 2, "AudioNode::NodeTypeOscillator is not 2 as expected" ); |
| 75 | static_assert(static_cast<size_t>(AudioNode::NodeTypeAudioBufferSource) == 3, "AudioNode::NodeTypeAudioBufferSource is not 3 as expected" ); |
| 76 | static_assert(static_cast<size_t>(AudioNode::NodeTypeMediaElementAudioSource) == 4, "AudioNode::NodeTypeMediaElementAudioSource is not 4 as expected" ); |
| 77 | static_assert(static_cast<size_t>(AudioNode::NodeTypeMediaStreamAudioDestination) == 5, "AudioNode::NodeTypeMediaStreamAudioDestination is not 5 as expected" ); |
| 78 | static_assert(static_cast<size_t>(AudioNode::NodeTypeMediaStreamAudioSource) == 6, "AudioNode::NodeTypeMediaStreamAudioSource is not 6 as expected" ); |
| 79 | static_assert(static_cast<size_t>(AudioNode::NodeTypeJavaScript) == 7, "AudioNode::NodeTypeJavaScript is not 7 as expected" ); |
| 80 | static_assert(static_cast<size_t>(AudioNode::NodeTypeBiquadFilter) == 8, "AudioNode::NodeTypeBiquadFilter is not 8 as expected" ); |
| 81 | static_assert(static_cast<size_t>(AudioNode::NodeTypePanner) == 9, "AudioNode::NodeTypePanner is not 9 as expected" ); |
| 82 | static_assert(static_cast<size_t>(AudioNode::NodeTypeConvolver) == 10, "AudioNode::NodeTypeConvolver is not 10 as expected" ); |
| 83 | static_assert(static_cast<size_t>(AudioNode::NodeTypeDelay) == 11, "AudioNode::NodeTypeDelay is not 11 as expected" ); |
| 84 | static_assert(static_cast<size_t>(AudioNode::NodeTypeGain) == 12, "AudioNode::NodeTypeGain is not 12 as expected" ); |
| 85 | static_assert(static_cast<size_t>(AudioNode::NodeTypeChannelSplitter) == 13, "AudioNode::NodeTypeChannelSplitter is not 13 as expected" ); |
| 86 | static_assert(static_cast<size_t>(AudioNode::NodeTypeChannelMerger) == 14, "AudioNode::NodeTypeChannelMerger is not 14 as expected" ); |
| 87 | static_assert(static_cast<size_t>(AudioNode::NodeTypeAnalyser) == 15, "AudioNode::NodeTypeAnalyser is not 15 as expected" ); |
| 88 | static_assert(static_cast<size_t>(AudioNode::NodeTypeDynamicsCompressor) == 16, "AudioNode::NodeTypeDynamicsCompressor is not 16 as expected" ); |
| 89 | static_assert(static_cast<size_t>(AudioNode::NodeTypeWaveShaper) == 17, "AudioNode::NodeTypeWaveShaper is not 17 as expected" ); |
| 90 | static_assert(static_cast<size_t>(AudioNode::NodeTypeBasicInspector) == 18, "AudioNode::NodeTypeBasicInspector is not 18 as expected" ); |
| 91 | static_assert(static_cast<size_t>(AudioNode::NodeTypeEnd) == 19, "AudioNode::NodeTypeEnd is not 19 as expected" ); |
| 92 | |
| 93 | ASSERT(static_cast<size_t>(enumerationValue) < WTF_ARRAY_LENGTH(values)); |
| 94 | |
| 95 | return values[static_cast<size_t>(enumerationValue)]; |
| 96 | } |
| 97 | |
| 98 | AudioNode::AudioNode(AudioContext& context, float sampleRate) |
| 99 | : m_isInitialized(false) |
| 100 | , m_nodeType(NodeTypeUnknown) |
| 101 | , m_context(context) |
| 102 | , m_sampleRate(sampleRate) |
| 103 | , m_lastProcessingTime(-1) |
| 104 | , m_lastNonSilentTime(-1) |
| 105 | , m_normalRefCount(1) // start out with normal refCount == 1 (like WTF::RefCounted class) |
| 106 | , m_connectionRefCount(0) |
| 107 | , m_isMarkedForDeletion(false) |
| 108 | , m_isDisabled(false) |
| 109 | #if !RELEASE_LOG_DISABLED |
| 110 | , m_logger(context.logger()) |
| 111 | , m_logIdentifier(context.nextAudioNodeLogIdentifier()) |
| 112 | #endif |
| 113 | , m_channelCount(2) |
| 114 | , m_channelCountMode(Max) |
| 115 | , m_channelInterpretation(AudioBus::Speakers) |
| 116 | { |
| 117 | ALWAYS_LOG(LOGIDENTIFIER); |
| 118 | |
| 119 | #if DEBUG_AUDIONODE_REFERENCES |
| 120 | if (!s_isNodeCountInitialized) { |
| 121 | s_isNodeCountInitialized = true; |
| 122 | atexit(AudioNode::printNodeCounts); |
| 123 | } |
| 124 | #endif |
| 125 | } |
| 126 | |
| 127 | AudioNode::~AudioNode() |
| 128 | { |
| 129 | ALWAYS_LOG(LOGIDENTIFIER); |
| 130 | |
| 131 | ASSERT(isMainThread()); |
| 132 | #if DEBUG_AUDIONODE_REFERENCES |
| 133 | --s_nodeCount[nodeType()]; |
| 134 | fprintf(stderr, "%p: %d: AudioNode::~AudioNode() %d %d\n" , this, nodeType(), m_normalRefCount.load(), m_connectionRefCount); |
| 135 | #endif |
| 136 | } |
| 137 | |
| 138 | void AudioNode::initialize() |
| 139 | { |
| 140 | m_isInitialized = true; |
| 141 | } |
| 142 | |
| 143 | void AudioNode::uninitialize() |
| 144 | { |
| 145 | m_isInitialized = false; |
| 146 | } |
| 147 | |
| 148 | void AudioNode::setNodeType(NodeType type) |
| 149 | { |
| 150 | ASSERT(isMainThread()); |
| 151 | ALWAYS_LOG(LOGIDENTIFIER, type); |
| 152 | |
| 153 | m_nodeType = type; |
| 154 | |
| 155 | #if DEBUG_AUDIONODE_REFERENCES |
| 156 | ++s_nodeCount[type]; |
| 157 | #endif |
| 158 | } |
| 159 | |
| 160 | void AudioNode::lazyInitialize() |
| 161 | { |
| 162 | if (!isInitialized()) |
| 163 | initialize(); |
| 164 | } |
| 165 | |
| 166 | void AudioNode::addInput(std::unique_ptr<AudioNodeInput> input) |
| 167 | { |
| 168 | ASSERT(isMainThread()); |
| 169 | INFO_LOG(LOGIDENTIFIER, input->node()->nodeType()); |
| 170 | m_inputs.append(WTFMove(input)); |
| 171 | } |
| 172 | |
| 173 | void AudioNode::addOutput(std::unique_ptr<AudioNodeOutput> output) |
| 174 | { |
| 175 | ASSERT(isMainThread()); |
| 176 | INFO_LOG(LOGIDENTIFIER, output->node()->nodeType()); |
| 177 | m_outputs.append(WTFMove(output)); |
| 178 | } |
| 179 | |
| 180 | AudioNodeInput* AudioNode::input(unsigned i) |
| 181 | { |
| 182 | if (i < m_inputs.size()) |
| 183 | return m_inputs[i].get(); |
| 184 | return nullptr; |
| 185 | } |
| 186 | |
| 187 | AudioNodeOutput* AudioNode::output(unsigned i) |
| 188 | { |
| 189 | if (i < m_outputs.size()) |
| 190 | return m_outputs[i].get(); |
| 191 | return nullptr; |
| 192 | } |
| 193 | |
| 194 | ExceptionOr<void> AudioNode::connect(AudioNode& destination, unsigned outputIndex, unsigned inputIndex) |
| 195 | { |
| 196 | ASSERT(isMainThread()); |
| 197 | AudioContext::AutoLocker locker(context()); |
| 198 | |
| 199 | ALWAYS_LOG(LOGIDENTIFIER, destination.nodeType(), ", output = " , outputIndex, ", input = " , inputIndex); |
| 200 | |
| 201 | // Sanity check input and output indices. |
| 202 | if (outputIndex >= numberOfOutputs()) |
| 203 | return Exception { IndexSizeError }; |
| 204 | |
| 205 | if (inputIndex >= destination.numberOfInputs()) |
| 206 | return Exception { IndexSizeError }; |
| 207 | |
| 208 | if (context() != destination.context()) |
| 209 | return Exception { SyntaxError }; |
| 210 | |
| 211 | auto* input = destination.input(inputIndex); |
| 212 | auto* output = this->output(outputIndex); |
| 213 | input->connect(output); |
| 214 | |
| 215 | // Let context know that a connection has been made. |
| 216 | context().incrementConnectionCount(); |
| 217 | |
| 218 | return { }; |
| 219 | } |
| 220 | |
| 221 | ExceptionOr<void> AudioNode::connect(AudioParam& param, unsigned outputIndex) |
| 222 | { |
| 223 | AudioContext::AutoLocker locker(context()); |
| 224 | |
| 225 | ASSERT(isMainThread()); |
| 226 | |
| 227 | INFO_LOG(LOGIDENTIFIER, param.name(), ", output = " , outputIndex); |
| 228 | |
| 229 | if (outputIndex >= numberOfOutputs()) |
| 230 | return Exception { IndexSizeError }; |
| 231 | |
| 232 | if (context() != param.context()) |
| 233 | return Exception { SyntaxError }; |
| 234 | |
| 235 | auto* output = this->output(outputIndex); |
| 236 | param.connect(output); |
| 237 | |
| 238 | return { }; |
| 239 | } |
| 240 | |
| 241 | ExceptionOr<void> AudioNode::disconnect(unsigned outputIndex) |
| 242 | { |
| 243 | ASSERT(isMainThread()); |
| 244 | AudioContext::AutoLocker locker(context()); |
| 245 | |
| 246 | // Sanity check input and output indices. |
| 247 | if (outputIndex >= numberOfOutputs()) |
| 248 | return Exception { IndexSizeError }; |
| 249 | |
| 250 | auto* output = this->output(outputIndex); |
| 251 | INFO_LOG(LOGIDENTIFIER, output->node()->nodeType()); |
| 252 | |
| 253 | output->disconnectAll(); |
| 254 | |
| 255 | return { }; |
| 256 | } |
| 257 | |
| 258 | unsigned AudioNode::channelCount() |
| 259 | { |
| 260 | return m_channelCount; |
| 261 | } |
| 262 | |
| 263 | ExceptionOr<void> AudioNode::setChannelCount(unsigned channelCount) |
| 264 | { |
| 265 | ASSERT(isMainThread()); |
| 266 | AudioContext::AutoLocker locker(context()); |
| 267 | |
| 268 | ALWAYS_LOG(LOGIDENTIFIER, channelCount); |
| 269 | |
| 270 | if (!(channelCount > 0 && channelCount <= AudioContext::maxNumberOfChannels())) |
| 271 | return Exception { InvalidStateError }; |
| 272 | |
| 273 | if (m_channelCount == channelCount) |
| 274 | return { }; |
| 275 | |
| 276 | m_channelCount = channelCount; |
| 277 | if (m_channelCountMode != Max) |
| 278 | updateChannelsForInputs(); |
| 279 | return { }; |
| 280 | } |
| 281 | |
| 282 | String AudioNode::channelCountMode() |
| 283 | { |
| 284 | switch (m_channelCountMode) { |
| 285 | case Max: |
| 286 | return "max"_s ; |
| 287 | case ClampedMax: |
| 288 | return "clamped-max"_s ; |
| 289 | case Explicit: |
| 290 | return "explicit"_s ; |
| 291 | } |
| 292 | ASSERT_NOT_REACHED(); |
| 293 | return emptyString(); |
| 294 | } |
| 295 | |
| 296 | ExceptionOr<void> AudioNode::setChannelCountMode(const String& mode) |
| 297 | { |
| 298 | ASSERT(isMainThread()); |
| 299 | AudioContext::AutoLocker locker(context()); |
| 300 | |
| 301 | ALWAYS_LOG(LOGIDENTIFIER, mode); |
| 302 | |
| 303 | ChannelCountMode oldMode = m_channelCountMode; |
| 304 | |
| 305 | if (mode == "max" ) |
| 306 | m_channelCountMode = Max; |
| 307 | else if (mode == "clamped-max" ) |
| 308 | m_channelCountMode = ClampedMax; |
| 309 | else if (mode == "explicit" ) |
| 310 | m_channelCountMode = Explicit; |
| 311 | else |
| 312 | return Exception { InvalidStateError }; |
| 313 | |
| 314 | if (m_channelCountMode != oldMode) |
| 315 | updateChannelsForInputs(); |
| 316 | |
| 317 | return { }; |
| 318 | } |
| 319 | |
| 320 | String AudioNode::channelInterpretation() |
| 321 | { |
| 322 | switch (m_channelInterpretation) { |
| 323 | case AudioBus::Speakers: |
| 324 | return "speakers"_s ; |
| 325 | case AudioBus::Discrete: |
| 326 | return "discrete"_s ; |
| 327 | } |
| 328 | ASSERT_NOT_REACHED(); |
| 329 | return emptyString(); |
| 330 | } |
| 331 | |
| 332 | ExceptionOr<void> AudioNode::setChannelInterpretation(const String& interpretation) |
| 333 | { |
| 334 | ASSERT(isMainThread()); |
| 335 | AudioContext::AutoLocker locker(context()); |
| 336 | |
| 337 | ALWAYS_LOG(LOGIDENTIFIER, interpretation); |
| 338 | |
| 339 | if (interpretation == "speakers" ) |
| 340 | m_channelInterpretation = AudioBus::Speakers; |
| 341 | else if (interpretation == "discrete" ) |
| 342 | m_channelInterpretation = AudioBus::Discrete; |
| 343 | else |
| 344 | return Exception { InvalidStateError }; |
| 345 | |
| 346 | return { }; |
| 347 | } |
| 348 | |
| 349 | void AudioNode::updateChannelsForInputs() |
| 350 | { |
| 351 | for (auto& input : m_inputs) |
| 352 | input->changedOutputs(); |
| 353 | } |
| 354 | |
| 355 | EventTargetInterface AudioNode::eventTargetInterface() const |
| 356 | { |
| 357 | return AudioNodeEventTargetInterfaceType; |
| 358 | } |
| 359 | |
| 360 | ScriptExecutionContext* AudioNode::scriptExecutionContext() const |
| 361 | { |
| 362 | return static_cast<ActiveDOMObject&>(const_cast<AudioNode*>(this)->context()).scriptExecutionContext(); |
| 363 | } |
| 364 | |
| 365 | void AudioNode::processIfNecessary(size_t framesToProcess) |
| 366 | { |
| 367 | ASSERT(context().isAudioThread()); |
| 368 | |
| 369 | if (!isInitialized()) |
| 370 | return; |
| 371 | |
| 372 | // Ensure that we only process once per rendering quantum. |
| 373 | // This handles the "fanout" problem where an output is connected to multiple inputs. |
| 374 | // The first time we're called during this time slice we process, but after that we don't want to re-process, |
| 375 | // instead our output(s) will already have the results cached in their bus; |
| 376 | double currentTime = context().currentTime(); |
| 377 | if (m_lastProcessingTime != currentTime) { |
| 378 | m_lastProcessingTime = currentTime; // important to first update this time because of feedback loops in the rendering graph |
| 379 | |
| 380 | pullInputs(framesToProcess); |
| 381 | |
| 382 | bool silentInputs = inputsAreSilent(); |
| 383 | if (!silentInputs) |
| 384 | m_lastNonSilentTime = (context().currentSampleFrame() + framesToProcess) / static_cast<double>(m_sampleRate); |
| 385 | |
| 386 | if (silentInputs && propagatesSilence()) |
| 387 | silenceOutputs(); |
| 388 | else |
| 389 | process(framesToProcess); |
| 390 | } |
| 391 | } |
| 392 | |
| 393 | void AudioNode::checkNumberOfChannelsForInput(AudioNodeInput* input) |
| 394 | { |
| 395 | ASSERT(context().isAudioThread() && context().isGraphOwner()); |
| 396 | |
| 397 | for (auto& savedInput : m_inputs) { |
| 398 | if (input == savedInput.get()) { |
| 399 | input->updateInternalBus(); |
| 400 | return; |
| 401 | } |
| 402 | } |
| 403 | |
| 404 | ASSERT_NOT_REACHED(); |
| 405 | } |
| 406 | |
| 407 | bool AudioNode::propagatesSilence() const |
| 408 | { |
| 409 | return m_lastNonSilentTime + latencyTime() + tailTime() < context().currentTime(); |
| 410 | } |
| 411 | |
| 412 | void AudioNode::pullInputs(size_t framesToProcess) |
| 413 | { |
| 414 | ASSERT(context().isAudioThread()); |
| 415 | |
| 416 | // Process all of the AudioNodes connected to our inputs. |
| 417 | for (auto& input : m_inputs) |
| 418 | input->pull(0, framesToProcess); |
| 419 | } |
| 420 | |
| 421 | bool AudioNode::inputsAreSilent() |
| 422 | { |
| 423 | for (auto& input : m_inputs) { |
| 424 | if (!input->bus()->isSilent()) |
| 425 | return false; |
| 426 | } |
| 427 | return true; |
| 428 | } |
| 429 | |
| 430 | void AudioNode::silenceOutputs() |
| 431 | { |
| 432 | for (auto& output : m_outputs) |
| 433 | output->bus()->zero(); |
| 434 | } |
| 435 | |
| 436 | void AudioNode::enableOutputsIfNecessary() |
| 437 | { |
| 438 | if (m_isDisabled && m_connectionRefCount > 0) { |
| 439 | ASSERT(isMainThread()); |
| 440 | AudioContext::AutoLocker locker(context()); |
| 441 | |
| 442 | m_isDisabled = false; |
| 443 | for (auto& output : m_outputs) |
| 444 | output->enable(); |
| 445 | } |
| 446 | } |
| 447 | |
| 448 | void AudioNode::disableOutputsIfNecessary() |
| 449 | { |
| 450 | // Disable outputs if appropriate. We do this if the number of connections is 0 or 1. The case |
| 451 | // of 0 is from finishDeref() where there are no connections left. The case of 1 is from |
| 452 | // AudioNodeInput::disable() where we want to disable outputs when there's only one connection |
| 453 | // left because we're ready to go away, but can't quite yet. |
| 454 | if (m_connectionRefCount <= 1 && !m_isDisabled) { |
| 455 | // Still may have JavaScript references, but no more "active" connection references, so put all of our outputs in a "dormant" disabled state. |
| 456 | // Garbage collection may take a very long time after this time, so the "dormant" disabled nodes should not bog down the rendering... |
| 457 | |
| 458 | // As far as JavaScript is concerned, our outputs must still appear to be connected. |
| 459 | // But internally our outputs should be disabled from the inputs they're connected to. |
| 460 | // disable() can recursively deref connections (and call disable()) down a whole chain of connected nodes. |
| 461 | |
| 462 | // FIXME: we special case the convolver and delay since they have a significant tail-time and shouldn't be disconnected simply |
| 463 | // because they no longer have any input connections. This needs to be handled more generally where AudioNodes have |
| 464 | // a tailTime attribute. Then the AudioNode only needs to remain "active" for tailTime seconds after there are no |
| 465 | // longer any active connections. |
| 466 | if (nodeType() != NodeTypeConvolver && nodeType() != NodeTypeDelay) { |
| 467 | m_isDisabled = true; |
| 468 | for (auto& output : m_outputs) |
| 469 | output->disable(); |
| 470 | } |
| 471 | } |
| 472 | } |
| 473 | |
| 474 | void AudioNode::ref(RefType refType) |
| 475 | { |
| 476 | switch (refType) { |
| 477 | case RefTypeNormal: |
| 478 | ++m_normalRefCount; |
| 479 | break; |
| 480 | case RefTypeConnection: |
| 481 | ++m_connectionRefCount; |
| 482 | break; |
| 483 | default: |
| 484 | ASSERT_NOT_REACHED(); |
| 485 | } |
| 486 | |
| 487 | #if DEBUG_AUDIONODE_REFERENCES |
| 488 | fprintf(stderr, "%p: %d: AudioNode::ref(%d) %d %d\n" , this, nodeType(), refType, m_normalRefCount, m_connectionRefCount); |
| 489 | #endif |
| 490 | |
| 491 | // See the disabling code in finishDeref() below. This handles the case where a node |
| 492 | // is being re-connected after being used at least once and disconnected. |
| 493 | // In this case, we need to re-enable. |
| 494 | if (refType == RefTypeConnection) |
| 495 | enableOutputsIfNecessary(); |
| 496 | } |
| 497 | |
| 498 | void AudioNode::deref(RefType refType) |
| 499 | { |
| 500 | // The actually work for deref happens completely within the audio context's graph lock. |
| 501 | // In the case of the audio thread, we must use a tryLock to avoid glitches. |
| 502 | bool hasLock = false; |
| 503 | bool mustReleaseLock = false; |
| 504 | |
| 505 | if (context().isAudioThread()) { |
| 506 | // Real-time audio thread must not contend lock (to avoid glitches). |
| 507 | hasLock = context().tryLock(mustReleaseLock); |
| 508 | } else { |
| 509 | context().lock(mustReleaseLock); |
| 510 | hasLock = true; |
| 511 | } |
| 512 | |
| 513 | if (hasLock) { |
| 514 | // This is where the real deref work happens. |
| 515 | finishDeref(refType); |
| 516 | |
| 517 | if (mustReleaseLock) |
| 518 | context().unlock(); |
| 519 | } else { |
| 520 | // We were unable to get the lock, so put this in a list to finish up later. |
| 521 | ASSERT(context().isAudioThread()); |
| 522 | ASSERT(refType == RefTypeConnection); |
| 523 | context().addDeferredFinishDeref(this); |
| 524 | } |
| 525 | |
| 526 | // Once AudioContext::uninitialize() is called there's no more chances for deleteMarkedNodes() to get called, so we call here. |
| 527 | // We can't call in AudioContext::~AudioContext() since it will never be called as long as any AudioNode is alive |
| 528 | // because AudioNodes keep a reference to the context. |
| 529 | if (context().isAudioThreadFinished()) |
| 530 | context().deleteMarkedNodes(); |
| 531 | } |
| 532 | |
| 533 | void AudioNode::finishDeref(RefType refType) |
| 534 | { |
| 535 | ASSERT(context().isGraphOwner()); |
| 536 | |
| 537 | switch (refType) { |
| 538 | case RefTypeNormal: |
| 539 | ASSERT(m_normalRefCount > 0); |
| 540 | --m_normalRefCount; |
| 541 | break; |
| 542 | case RefTypeConnection: |
| 543 | ASSERT(m_connectionRefCount > 0); |
| 544 | --m_connectionRefCount; |
| 545 | break; |
| 546 | default: |
| 547 | ASSERT_NOT_REACHED(); |
| 548 | } |
| 549 | |
| 550 | #if DEBUG_AUDIONODE_REFERENCES |
| 551 | fprintf(stderr, "%p: %d: AudioNode::deref(%d) %d %d\n" , this, nodeType(), refType, m_normalRefCount, m_connectionRefCount); |
| 552 | #endif |
| 553 | |
| 554 | if (!m_connectionRefCount) { |
| 555 | if (!m_normalRefCount) { |
| 556 | if (!m_isMarkedForDeletion) { |
| 557 | // All references are gone - we need to go away. |
| 558 | for (auto& output : m_outputs) |
| 559 | output->disconnectAll(); // This will deref() nodes we're connected to. |
| 560 | |
| 561 | // Mark for deletion at end of each render quantum or when context shuts down. |
| 562 | context().markForDeletion(*this); |
| 563 | m_isMarkedForDeletion = true; |
| 564 | } |
| 565 | } else if (refType == RefTypeConnection) |
| 566 | disableOutputsIfNecessary(); |
| 567 | } |
| 568 | } |
| 569 | |
| 570 | #if DEBUG_AUDIONODE_REFERENCES |
| 571 | |
| 572 | bool AudioNode::s_isNodeCountInitialized = false; |
| 573 | int AudioNode::s_nodeCount[NodeTypeEnd]; |
| 574 | |
| 575 | void AudioNode::printNodeCounts() |
| 576 | { |
| 577 | fprintf(stderr, "\n\n" ); |
| 578 | fprintf(stderr, "===========================\n" ); |
| 579 | fprintf(stderr, "AudioNode: reference counts\n" ); |
| 580 | fprintf(stderr, "===========================\n" ); |
| 581 | |
| 582 | for (unsigned i = 0; i < NodeTypeEnd; ++i) |
| 583 | fprintf(stderr, "%d: %d\n" , i, s_nodeCount[i]); |
| 584 | |
| 585 | fprintf(stderr, "===========================\n\n\n" ); |
| 586 | } |
| 587 | |
| 588 | #endif // DEBUG_AUDIONODE_REFERENCES |
| 589 | |
| 590 | #if !RELEASE_LOG_DISABLED |
| 591 | WTFLogChannel& AudioNode::logChannel() const |
| 592 | { |
| 593 | return LogMedia; |
| 594 | } |
| 595 | #endif |
| 596 | |
| 597 | } // namespace WebCore |
| 598 | |
| 599 | #endif // ENABLE(WEB_AUDIO) |
| 600 | |