| 1 | /* |
| 2 | * Copyright (C) 2011 Google Inc. All rights reserved. |
| 3 | * |
| 4 | * Redistribution and use in source and binary forms, with or without |
| 5 | * modification, are permitted provided that the following conditions |
| 6 | * are met: |
| 7 | * |
| 8 | * 1. Redistributions of source code must retain the above copyright |
| 9 | * notice, this list of conditions and the following disclaimer. |
| 10 | * 2. Redistributions in binary form must reproduce the above copyright |
| 11 | * notice, this list of conditions and the following disclaimer in the |
| 12 | * documentation and/or other materials provided with the distribution. |
| 13 | * |
| 14 | * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY |
| 15 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED |
| 16 | * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE |
| 17 | * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY |
| 18 | * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES |
| 19 | * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; |
| 20 | * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND |
| 21 | * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| 22 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
| 23 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 24 | */ |
| 25 | |
| 26 | #include "config.h" |
| 27 | |
| 28 | #if ENABLE(WEB_AUDIO) |
| 29 | |
| 30 | #include "AudioParamTimeline.h" |
| 31 | |
| 32 | #include "AudioUtilities.h" |
| 33 | #include "FloatConversion.h" |
| 34 | #include <algorithm> |
| 35 | #include <wtf/MathExtras.h> |
| 36 | |
| 37 | namespace WebCore { |
| 38 | |
| 39 | void AudioParamTimeline::setValueAtTime(float value, float time) |
| 40 | { |
| 41 | insertEvent(ParamEvent(ParamEvent::SetValue, value, time, 0, 0, 0)); |
| 42 | } |
| 43 | |
| 44 | void AudioParamTimeline::linearRampToValueAtTime(float value, float time) |
| 45 | { |
| 46 | insertEvent(ParamEvent(ParamEvent::LinearRampToValue, value, time, 0, 0, 0)); |
| 47 | } |
| 48 | |
| 49 | void AudioParamTimeline::exponentialRampToValueAtTime(float value, float time) |
| 50 | { |
| 51 | insertEvent(ParamEvent(ParamEvent::ExponentialRampToValue, value, time, 0, 0, 0)); |
| 52 | } |
| 53 | |
| 54 | void AudioParamTimeline::setTargetAtTime(float target, float time, float timeConstant) |
| 55 | { |
| 56 | insertEvent(ParamEvent(ParamEvent::SetTarget, target, time, timeConstant, 0, 0)); |
| 57 | } |
| 58 | |
| 59 | void AudioParamTimeline::setValueCurveAtTime(Float32Array* curve, float time, float duration) |
| 60 | { |
| 61 | insertEvent(ParamEvent(ParamEvent::SetValueCurve, 0, time, 0, duration, curve)); |
| 62 | } |
| 63 | |
| 64 | static bool isValidNumber(float x) |
| 65 | { |
| 66 | return !std::isnan(x) && !std::isinf(x); |
| 67 | } |
| 68 | |
| 69 | void AudioParamTimeline::insertEvent(const ParamEvent& event) |
| 70 | { |
| 71 | // Sanity check the event. Be super careful we're not getting infected with NaN or Inf. |
| 72 | bool isValid = event.type() < ParamEvent::LastType |
| 73 | && isValidNumber(event.value()) |
| 74 | && isValidNumber(event.time()) |
| 75 | && isValidNumber(event.timeConstant()) |
| 76 | && isValidNumber(event.duration()) |
| 77 | && event.duration() >= 0; |
| 78 | |
| 79 | ASSERT(isValid); |
| 80 | if (!isValid) |
| 81 | return; |
| 82 | |
| 83 | std::lock_guard<Lock> lock(m_eventsMutex); |
| 84 | |
| 85 | unsigned i = 0; |
| 86 | float insertTime = event.time(); |
| 87 | for (auto& paramEvent : m_events) { |
| 88 | // Overwrite same event type and time. |
| 89 | if (paramEvent.time() == insertTime && paramEvent.type() == event.type()) { |
| 90 | paramEvent = event; |
| 91 | return; |
| 92 | } |
| 93 | |
| 94 | if (paramEvent.time() > insertTime) |
| 95 | break; |
| 96 | |
| 97 | ++i; |
| 98 | } |
| 99 | |
| 100 | m_events.insert(i, event); |
| 101 | } |
| 102 | |
| 103 | void AudioParamTimeline::cancelScheduledValues(float startTime) |
| 104 | { |
| 105 | std::lock_guard<Lock> lock(m_eventsMutex); |
| 106 | |
| 107 | // Remove all events starting at startTime. |
| 108 | for (unsigned i = 0; i < m_events.size(); ++i) { |
| 109 | if (m_events[i].time() >= startTime) { |
| 110 | m_events.remove(i, m_events.size() - i); |
| 111 | break; |
| 112 | } |
| 113 | } |
| 114 | } |
| 115 | |
| 116 | float AudioParamTimeline::valueForContextTime(AudioContext& context, float defaultValue, bool& hasValue) |
| 117 | { |
| 118 | { |
| 119 | std::unique_lock<Lock> lock(m_eventsMutex, std::try_to_lock); |
| 120 | if (!lock.owns_lock() || !m_events.size() || context.currentTime() < m_events[0].time()) { |
| 121 | hasValue = false; |
| 122 | return defaultValue; |
| 123 | } |
| 124 | } |
| 125 | |
| 126 | // Ask for just a single value. |
| 127 | float value; |
| 128 | double sampleRate = context.sampleRate(); |
| 129 | double startTime = context.currentTime(); |
| 130 | double endTime = startTime + 1.1 / sampleRate; // time just beyond one sample-frame |
| 131 | double controlRate = sampleRate / AudioNode::ProcessingSizeInFrames; // one parameter change per render quantum |
| 132 | value = valuesForTimeRange(startTime, endTime, defaultValue, &value, 1, sampleRate, controlRate); |
| 133 | |
| 134 | hasValue = true; |
| 135 | return value; |
| 136 | } |
| 137 | |
| 138 | float AudioParamTimeline::valuesForTimeRange(double startTime, double endTime, float defaultValue, float* values, unsigned numberOfValues, double sampleRate, double controlRate) |
| 139 | { |
| 140 | // We can't contend the lock in the realtime audio thread. |
| 141 | std::unique_lock<Lock> lock(m_eventsMutex, std::try_to_lock); |
| 142 | if (!lock.owns_lock()) { |
| 143 | if (values) { |
| 144 | for (unsigned i = 0; i < numberOfValues; ++i) |
| 145 | values[i] = defaultValue; |
| 146 | } |
| 147 | return defaultValue; |
| 148 | } |
| 149 | |
| 150 | float value = valuesForTimeRangeImpl(startTime, endTime, defaultValue, values, numberOfValues, sampleRate, controlRate); |
| 151 | |
| 152 | return value; |
| 153 | } |
| 154 | |
| 155 | float AudioParamTimeline::valuesForTimeRangeImpl(double startTime, double endTime, float defaultValue, float* values, unsigned numberOfValues, double sampleRate, double controlRate) |
| 156 | { |
| 157 | ASSERT(values); |
| 158 | if (!values) |
| 159 | return defaultValue; |
| 160 | |
| 161 | // Return default value if there are no events matching the desired time range. |
| 162 | if (!m_events.size() || endTime <= m_events[0].time()) { |
| 163 | for (unsigned i = 0; i < numberOfValues; ++i) |
| 164 | values[i] = defaultValue; |
| 165 | return defaultValue; |
| 166 | } |
| 167 | |
| 168 | // Maintain a running time and index for writing the values buffer. |
| 169 | double currentTime = startTime; |
| 170 | unsigned writeIndex = 0; |
| 171 | |
| 172 | // If first event is after startTime then fill initial part of values buffer with defaultValue |
| 173 | // until we reach the first event time. |
| 174 | double firstEventTime = m_events[0].time(); |
| 175 | if (firstEventTime > startTime) { |
| 176 | double fillToTime = std::min(endTime, firstEventTime); |
| 177 | unsigned fillToFrame = AudioUtilities::timeToSampleFrame(fillToTime - startTime, sampleRate); |
| 178 | fillToFrame = std::min(fillToFrame, numberOfValues); |
| 179 | for (; writeIndex < fillToFrame; ++writeIndex) |
| 180 | values[writeIndex] = defaultValue; |
| 181 | |
| 182 | currentTime = fillToTime; |
| 183 | } |
| 184 | |
| 185 | float value = defaultValue; |
| 186 | |
| 187 | // Go through each event and render the value buffer where the times overlap, |
| 188 | // stopping when we've rendered all the requested values. |
| 189 | // FIXME: could try to optimize by avoiding having to iterate starting from the very first event |
| 190 | // and keeping track of a "current" event index. |
| 191 | int n = m_events.size(); |
| 192 | for (int i = 0; i < n && writeIndex < numberOfValues; ++i) { |
| 193 | ParamEvent& event = m_events[i]; |
| 194 | ParamEvent* nextEvent = i < n - 1 ? &(m_events[i + 1]) : 0; |
| 195 | |
| 196 | // Wait until we get a more recent event. |
| 197 | if (nextEvent && nextEvent->time() < currentTime) |
| 198 | continue; |
| 199 | |
| 200 | float value1 = event.value(); |
| 201 | double time1 = event.time(); |
| 202 | float value2 = nextEvent ? nextEvent->value() : value1; |
| 203 | double time2 = nextEvent ? nextEvent->time() : endTime + 1; |
| 204 | |
| 205 | double deltaTime = time2 - time1; |
| 206 | float k = deltaTime > 0 ? 1 / deltaTime : 0; |
| 207 | double sampleFrameTimeIncr = 1 / sampleRate; |
| 208 | |
| 209 | double fillToTime = std::min(endTime, time2); |
| 210 | unsigned fillToFrame = AudioUtilities::timeToSampleFrame(fillToTime - startTime, sampleRate); |
| 211 | fillToFrame = std::min(fillToFrame, numberOfValues); |
| 212 | |
| 213 | ParamEvent::Type nextEventType = nextEvent ? static_cast<ParamEvent::Type>(nextEvent->type()) : ParamEvent::LastType /* unknown */; |
| 214 | |
| 215 | // First handle linear and exponential ramps which require looking ahead to the next event. |
| 216 | if (nextEventType == ParamEvent::LinearRampToValue) { |
| 217 | for (; writeIndex < fillToFrame; ++writeIndex) { |
| 218 | float x = (currentTime - time1) * k; |
| 219 | value = (1 - x) * value1 + x * value2; |
| 220 | values[writeIndex] = value; |
| 221 | currentTime += sampleFrameTimeIncr; |
| 222 | } |
| 223 | } else if (nextEventType == ParamEvent::ExponentialRampToValue) { |
| 224 | if (value1 <= 0 || value2 <= 0) { |
| 225 | // Handle negative values error case by propagating previous value. |
| 226 | for (; writeIndex < fillToFrame; ++writeIndex) |
| 227 | values[writeIndex] = value; |
| 228 | } else { |
| 229 | float numSampleFrames = deltaTime * sampleRate; |
| 230 | // The value goes exponentially from value1 to value2 in a duration of deltaTime seconds (corresponding to numSampleFrames). |
| 231 | // Compute the per-sample multiplier. |
| 232 | float multiplier = powf(value2 / value1, 1 / numSampleFrames); |
| 233 | |
| 234 | // Set the starting value of the exponential ramp. This is the same as multiplier ^ |
| 235 | // AudioUtilities::timeToSampleFrame(currentTime - time1, sampleRate), but is more |
| 236 | // accurate, especially if multiplier is close to 1. |
| 237 | value = value1 * powf(value2 / value1, |
| 238 | AudioUtilities::timeToSampleFrame(currentTime - time1, sampleRate) / numSampleFrames); |
| 239 | |
| 240 | for (; writeIndex < fillToFrame; ++writeIndex) { |
| 241 | values[writeIndex] = value; |
| 242 | value *= multiplier; |
| 243 | currentTime += sampleFrameTimeIncr; |
| 244 | } |
| 245 | } |
| 246 | } else { |
| 247 | // Handle event types not requiring looking ahead to the next event. |
| 248 | switch (event.type()) { |
| 249 | case ParamEvent::SetValue: |
| 250 | case ParamEvent::LinearRampToValue: |
| 251 | case ParamEvent::ExponentialRampToValue: |
| 252 | { |
| 253 | currentTime = fillToTime; |
| 254 | |
| 255 | // Simply stay at a constant value. |
| 256 | value = event.value(); |
| 257 | for (; writeIndex < fillToFrame; ++writeIndex) |
| 258 | values[writeIndex] = value; |
| 259 | |
| 260 | break; |
| 261 | } |
| 262 | |
| 263 | case ParamEvent::SetTarget: |
| 264 | { |
| 265 | currentTime = fillToTime; |
| 266 | |
| 267 | // Exponential approach to target value with given time constant. |
| 268 | float target = event.value(); |
| 269 | float timeConstant = event.timeConstant(); |
| 270 | float discreteTimeConstant = static_cast<float>(AudioUtilities::discreteTimeConstantForSampleRate(timeConstant, controlRate)); |
| 271 | |
| 272 | for (; writeIndex < fillToFrame; ++writeIndex) { |
| 273 | values[writeIndex] = value; |
| 274 | value += (target - value) * discreteTimeConstant; |
| 275 | } |
| 276 | |
| 277 | break; |
| 278 | } |
| 279 | |
| 280 | case ParamEvent::SetValueCurve: |
| 281 | { |
| 282 | Float32Array* curve = event.curve(); |
| 283 | float* curveData = curve ? curve->data() : 0; |
| 284 | unsigned numberOfCurvePoints = curve ? curve->length() : 0; |
| 285 | |
| 286 | // Curve events have duration, so don't just use next event time. |
| 287 | float duration = event.duration(); |
| 288 | float durationFrames = duration * sampleRate; |
| 289 | float curvePointsPerFrame = static_cast<float>(numberOfCurvePoints) / durationFrames; |
| 290 | |
| 291 | if (!curve || !curveData || !numberOfCurvePoints || duration <= 0 || sampleRate <= 0) { |
| 292 | // Error condition - simply propagate previous value. |
| 293 | currentTime = fillToTime; |
| 294 | for (; writeIndex < fillToFrame; ++writeIndex) |
| 295 | values[writeIndex] = value; |
| 296 | break; |
| 297 | } |
| 298 | |
| 299 | // Save old values and recalculate information based on the curve's duration |
| 300 | // instead of the next event time. |
| 301 | unsigned nextEventFillToFrame = fillToFrame; |
| 302 | float nextEventFillToTime = fillToTime; |
| 303 | fillToTime = std::min(endTime, time1 + duration); |
| 304 | fillToFrame = AudioUtilities::timeToSampleFrame(fillToTime - startTime, sampleRate); |
| 305 | fillToFrame = std::min(fillToFrame, numberOfValues); |
| 306 | |
| 307 | // Index into the curve data using a floating-point value. |
| 308 | // We're scaling the number of curve points by the duration (see curvePointsPerFrame). |
| 309 | float curveVirtualIndex = 0; |
| 310 | if (time1 < currentTime) { |
| 311 | // Index somewhere in the middle of the curve data. |
| 312 | // Don't use timeToSampleFrame() since we want the exact floating-point frame. |
| 313 | float frameOffset = (currentTime - time1) * sampleRate; |
| 314 | curveVirtualIndex = curvePointsPerFrame * frameOffset; |
| 315 | } |
| 316 | |
| 317 | // Render the stretched curve data using nearest neighbor sampling. |
| 318 | // Oversampled curve data can be provided if smoothness is desired. |
| 319 | for (; writeIndex < fillToFrame; ++writeIndex) { |
| 320 | // Ideally we'd use round() from MathExtras, but we're in a tight loop here |
| 321 | // and we're trading off precision for extra speed. |
| 322 | unsigned curveIndex = static_cast<unsigned>(0.5 + curveVirtualIndex); |
| 323 | |
| 324 | curveVirtualIndex += curvePointsPerFrame; |
| 325 | |
| 326 | // Bounds check. |
| 327 | if (curveIndex < numberOfCurvePoints) |
| 328 | value = curveData[curveIndex]; |
| 329 | |
| 330 | values[writeIndex] = value; |
| 331 | } |
| 332 | |
| 333 | // If there's any time left after the duration of this event and the start |
| 334 | // of the next, then just propagate the last value. |
| 335 | for (; writeIndex < nextEventFillToFrame; ++writeIndex) |
| 336 | values[writeIndex] = value; |
| 337 | |
| 338 | // Re-adjust current time |
| 339 | currentTime = nextEventFillToTime; |
| 340 | |
| 341 | break; |
| 342 | } |
| 343 | } |
| 344 | } |
| 345 | } |
| 346 | |
| 347 | // If there's any time left after processing the last event then just propagate the last value |
| 348 | // to the end of the values buffer. |
| 349 | for (; writeIndex < numberOfValues; ++writeIndex) |
| 350 | values[writeIndex] = value; |
| 351 | |
| 352 | return value; |
| 353 | } |
| 354 | |
| 355 | } // namespace WebCore |
| 356 | |
| 357 | #endif // ENABLE(WEB_AUDIO) |
| 358 | |