1 | /* |
2 | * Copyright (C) 2011, 2012 Igalia S.L |
3 | * Copyright (C) 2011 Zan Dobersek <zandobersek@gmail.com> |
4 | * |
5 | * This library is free software; you can redistribute it and/or |
6 | * modify it under the terms of the GNU Lesser General Public |
7 | * License as published by the Free Software Foundation; either |
8 | * version 2 of the License, or (at your option) any later version. |
9 | * |
10 | * This library is distributed in the hope that it will be useful, |
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
13 | * Lesser General Public License for more details. |
14 | * |
15 | * You should have received a copy of the GNU Lesser General Public |
16 | * License along with this library; if not, write to the Free Software |
17 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA |
18 | */ |
19 | |
20 | #include "config.h" |
21 | |
22 | #if ENABLE(WEB_AUDIO) |
23 | |
24 | #include "AudioFileReader.h" |
25 | #include "AudioBus.h" |
26 | #include "GRefPtrGStreamer.h" |
27 | #include <gio/gio.h> |
28 | #include <gst/app/gstappsink.h> |
29 | #include <gst/audio/audio-info.h> |
30 | #include <gst/gst.h> |
31 | #include <wtf/MainThread.h> |
32 | #include <wtf/Noncopyable.h> |
33 | #include <wtf/RunLoop.h> |
34 | #include <wtf/Threading.h> |
35 | #include <wtf/WeakPtr.h> |
36 | #include <wtf/glib/GRefPtr.h> |
37 | #include <wtf/glib/GUniquePtr.h> |
38 | |
39 | namespace WebCore { |
40 | |
41 | class AudioFileReader : public CanMakeWeakPtr<AudioFileReader> { |
42 | WTF_MAKE_NONCOPYABLE(AudioFileReader); |
43 | public: |
44 | AudioFileReader(const char* filePath); |
45 | AudioFileReader(const void* data, size_t dataSize); |
46 | ~AudioFileReader(); |
47 | |
48 | RefPtr<AudioBus> createBus(float sampleRate, bool mixToMono); |
49 | |
50 | private: |
51 | static void deinterleavePadAddedCallback(AudioFileReader*, GstPad*); |
52 | static void deinterleaveReadyCallback(AudioFileReader*); |
53 | static void decodebinPadAddedCallback(AudioFileReader*, GstPad*); |
54 | |
55 | void handleMessage(GstMessage*); |
56 | void handleNewDeinterleavePad(GstPad*); |
57 | void deinterleavePadsConfigured(); |
58 | void plugDeinterleave(GstPad*); |
59 | void decodeAudioForBusCreation(); |
60 | GstFlowReturn handleSample(GstAppSink*); |
61 | |
62 | RunLoop& m_runLoop; |
63 | const void* m_data { nullptr }; |
64 | size_t m_dataSize { 0 }; |
65 | const char* m_filePath { nullptr }; |
66 | |
67 | float m_sampleRate { 0 }; |
68 | int m_channels { 0 }; |
69 | GRefPtr<GstBufferList> m_frontLeftBuffers; |
70 | GRefPtr<GstBufferList> m_frontRightBuffers; |
71 | |
72 | GRefPtr<GstElement> m_pipeline; |
73 | unsigned m_channelSize { 0 }; |
74 | GRefPtr<GstElement> m_decodebin; |
75 | GRefPtr<GstElement> m_deInterleave; |
76 | bool m_errorOccurred { false }; |
77 | }; |
78 | |
79 | static void copyGstreamerBuffersToAudioChannel(GstBufferList* buffers, AudioChannel* audioChannel) |
80 | { |
81 | float* destination = audioChannel->mutableData(); |
82 | unsigned bufferCount = gst_buffer_list_length(buffers); |
83 | for (unsigned i = 0; i < bufferCount; ++i) { |
84 | GstBuffer* buffer = gst_buffer_list_get(buffers, i); |
85 | ASSERT(buffer); |
86 | gsize bufferSize = gst_buffer_get_size(buffer); |
87 | gst_buffer_extract(buffer, 0, destination, bufferSize); |
88 | destination += bufferSize / sizeof(float); |
89 | } |
90 | } |
91 | |
92 | void AudioFileReader::deinterleavePadAddedCallback(AudioFileReader* reader, GstPad* pad) |
93 | { |
94 | reader->handleNewDeinterleavePad(pad); |
95 | } |
96 | |
97 | void AudioFileReader::deinterleaveReadyCallback(AudioFileReader* reader) |
98 | { |
99 | reader->deinterleavePadsConfigured(); |
100 | } |
101 | |
102 | void AudioFileReader::decodebinPadAddedCallback(AudioFileReader* reader, GstPad* pad) |
103 | { |
104 | reader->plugDeinterleave(pad); |
105 | } |
106 | |
107 | AudioFileReader::AudioFileReader(const char* filePath) |
108 | : m_runLoop(RunLoop::current()) |
109 | , m_filePath(filePath) |
110 | { |
111 | } |
112 | |
113 | AudioFileReader::AudioFileReader(const void* data, size_t dataSize) |
114 | : m_runLoop(RunLoop::current()) |
115 | , m_data(data) |
116 | , m_dataSize(dataSize) |
117 | { |
118 | } |
119 | |
120 | AudioFileReader::~AudioFileReader() |
121 | { |
122 | if (m_pipeline) { |
123 | GRefPtr<GstBus> bus = adoptGRef(gst_pipeline_get_bus(GST_PIPELINE(m_pipeline.get()))); |
124 | ASSERT(bus); |
125 | gst_bus_set_sync_handler(bus.get(), nullptr, nullptr, nullptr); |
126 | |
127 | gst_element_set_state(m_pipeline.get(), GST_STATE_NULL); |
128 | m_pipeline = nullptr; |
129 | } |
130 | |
131 | if (m_decodebin) { |
132 | g_signal_handlers_disconnect_matched(m_decodebin.get(), G_SIGNAL_MATCH_DATA, 0, 0, nullptr, nullptr, this); |
133 | m_decodebin = nullptr; |
134 | } |
135 | |
136 | if (m_deInterleave) { |
137 | g_signal_handlers_disconnect_matched(m_deInterleave.get(), G_SIGNAL_MATCH_DATA, 0, 0, nullptr, nullptr, this); |
138 | m_deInterleave = nullptr; |
139 | } |
140 | } |
141 | |
142 | GstFlowReturn AudioFileReader::handleSample(GstAppSink* sink) |
143 | { |
144 | GRefPtr<GstSample> sample = adoptGRef(gst_app_sink_pull_sample(sink)); |
145 | if (!sample) |
146 | return GST_FLOW_ERROR; |
147 | |
148 | GstBuffer* buffer = gst_sample_get_buffer(sample.get()); |
149 | if (!buffer) |
150 | return GST_FLOW_ERROR; |
151 | |
152 | GstCaps* caps = gst_sample_get_caps(sample.get()); |
153 | if (!caps) |
154 | return GST_FLOW_ERROR; |
155 | |
156 | GstAudioInfo info; |
157 | gst_audio_info_from_caps(&info, caps); |
158 | int frames = gst_buffer_get_size(buffer) / info.bpf; |
159 | |
160 | // Check the first audio channel. The buffer is supposed to store |
161 | // data of a single channel anyway. |
162 | switch (GST_AUDIO_INFO_POSITION(&info, 0)) { |
163 | case GST_AUDIO_CHANNEL_POSITION_FRONT_LEFT: |
164 | case GST_AUDIO_CHANNEL_POSITION_MONO: |
165 | gst_buffer_list_add(m_frontLeftBuffers.get(), gst_buffer_ref(buffer)); |
166 | m_channelSize += frames; |
167 | break; |
168 | case GST_AUDIO_CHANNEL_POSITION_FRONT_RIGHT: |
169 | gst_buffer_list_add(m_frontRightBuffers.get(), gst_buffer_ref(buffer)); |
170 | break; |
171 | default: |
172 | break; |
173 | } |
174 | |
175 | return GST_FLOW_OK; |
176 | } |
177 | |
178 | void AudioFileReader::handleMessage(GstMessage* message) |
179 | { |
180 | ASSERT(&m_runLoop == &RunLoop::current()); |
181 | |
182 | GUniqueOutPtr<GError> error; |
183 | GUniqueOutPtr<gchar> debug; |
184 | |
185 | switch (GST_MESSAGE_TYPE(message)) { |
186 | case GST_MESSAGE_EOS: |
187 | m_runLoop.stop(); |
188 | break; |
189 | case GST_MESSAGE_WARNING: |
190 | gst_message_parse_warning(message, &error.outPtr(), &debug.outPtr()); |
191 | g_warning("Warning: %d, %s. Debug output: %s" , error->code, error->message, debug.get()); |
192 | break; |
193 | case GST_MESSAGE_ERROR: |
194 | gst_message_parse_error(message, &error.outPtr(), &debug.outPtr()); |
195 | g_warning("Error: %d, %s. Debug output: %s" , error->code, error->message, debug.get()); |
196 | m_errorOccurred = true; |
197 | gst_element_set_state(m_pipeline.get(), GST_STATE_NULL); |
198 | m_runLoop.stop(); |
199 | break; |
200 | default: |
201 | break; |
202 | } |
203 | } |
204 | |
205 | void AudioFileReader::handleNewDeinterleavePad(GstPad* pad) |
206 | { |
207 | // A new pad for a planar channel was added in deinterleave. Plug |
208 | // in an appsink so we can pull the data from each |
209 | // channel. Pipeline looks like: |
210 | // ... deinterleave ! queue ! appsink. |
211 | GstElement* queue = gst_element_factory_make("queue" , nullptr); |
212 | GstElement* sink = gst_element_factory_make("appsink" , nullptr); |
213 | |
214 | static GstAppSinkCallbacks callbacks = { |
215 | nullptr, // eos |
216 | nullptr, // new_preroll |
217 | // new_sample |
218 | [](GstAppSink* sink, gpointer userData) -> GstFlowReturn { |
219 | return static_cast<AudioFileReader*>(userData)->handleSample(sink); |
220 | }, |
221 | { nullptr } |
222 | }; |
223 | gst_app_sink_set_callbacks(GST_APP_SINK(sink), &callbacks, this, nullptr); |
224 | |
225 | g_object_set(sink, "sync" , FALSE, nullptr); |
226 | |
227 | gst_bin_add_many(GST_BIN(m_pipeline.get()), queue, sink, nullptr); |
228 | |
229 | GRefPtr<GstPad> sinkPad = adoptGRef(gst_element_get_static_pad(queue, "sink" )); |
230 | gst_pad_link_full(pad, sinkPad.get(), GST_PAD_LINK_CHECK_NOTHING); |
231 | |
232 | gst_element_link_pads_full(queue, "src" , sink, "sink" , GST_PAD_LINK_CHECK_NOTHING); |
233 | |
234 | gst_element_sync_state_with_parent(queue); |
235 | gst_element_sync_state_with_parent(sink); |
236 | } |
237 | |
238 | void AudioFileReader::deinterleavePadsConfigured() |
239 | { |
240 | // All deinterleave src pads are now available, let's roll to |
241 | // PLAYING so data flows towards the sinks and it can be retrieved. |
242 | gst_element_set_state(m_pipeline.get(), GST_STATE_PLAYING); |
243 | } |
244 | |
245 | void AudioFileReader::plugDeinterleave(GstPad* pad) |
246 | { |
247 | // Ignore any additional source pads just in case. |
248 | if (m_deInterleave) |
249 | return; |
250 | |
251 | // A decodebin pad was added, plug in a deinterleave element to |
252 | // separate each planar channel. Sub pipeline looks like |
253 | // ... decodebin2 ! audioconvert ! audioresample ! capsfilter ! deinterleave. |
254 | GstElement* audioConvert = gst_element_factory_make("audioconvert" , nullptr); |
255 | GstElement* audioResample = gst_element_factory_make("audioresample" , nullptr); |
256 | GstElement* capsFilter = gst_element_factory_make("capsfilter" , nullptr); |
257 | m_deInterleave = gst_element_factory_make("deinterleave" , "deinterleave" ); |
258 | |
259 | g_object_set(m_deInterleave.get(), "keep-positions" , TRUE, nullptr); |
260 | g_signal_connect_swapped(m_deInterleave.get(), "pad-added" , G_CALLBACK(deinterleavePadAddedCallback), this); |
261 | g_signal_connect_swapped(m_deInterleave.get(), "no-more-pads" , G_CALLBACK(deinterleaveReadyCallback), this); |
262 | |
263 | GRefPtr<GstCaps> caps = adoptGRef(gst_caps_new_simple("audio/x-raw" , |
264 | "rate" , G_TYPE_INT, static_cast<int>(m_sampleRate), |
265 | "channels" , G_TYPE_INT, m_channels, |
266 | "format" , G_TYPE_STRING, GST_AUDIO_NE(F32), |
267 | "layout" , G_TYPE_STRING, "interleaved" , nullptr)); |
268 | g_object_set(capsFilter, "caps" , caps.get(), nullptr); |
269 | |
270 | gst_bin_add_many(GST_BIN(m_pipeline.get()), audioConvert, audioResample, capsFilter, m_deInterleave.get(), nullptr); |
271 | |
272 | GRefPtr<GstPad> sinkPad = adoptGRef(gst_element_get_static_pad(audioConvert, "sink" )); |
273 | gst_pad_link_full(pad, sinkPad.get(), GST_PAD_LINK_CHECK_NOTHING); |
274 | |
275 | gst_element_link_pads_full(audioConvert, "src" , audioResample, "sink" , GST_PAD_LINK_CHECK_NOTHING); |
276 | gst_element_link_pads_full(audioResample, "src" , capsFilter, "sink" , GST_PAD_LINK_CHECK_NOTHING); |
277 | gst_element_link_pads_full(capsFilter, "src" , m_deInterleave.get(), "sink" , GST_PAD_LINK_CHECK_NOTHING); |
278 | |
279 | gst_element_sync_state_with_parent(audioConvert); |
280 | gst_element_sync_state_with_parent(audioResample); |
281 | gst_element_sync_state_with_parent(capsFilter); |
282 | gst_element_sync_state_with_parent(m_deInterleave.get()); |
283 | } |
284 | |
285 | void AudioFileReader::decodeAudioForBusCreation() |
286 | { |
287 | ASSERT(&m_runLoop == &RunLoop::current()); |
288 | |
289 | // Build the pipeline (giostreamsrc | filesrc) ! decodebin2 |
290 | // A deinterleave element is added once a src pad becomes available in decodebin. |
291 | m_pipeline = gst_pipeline_new(nullptr); |
292 | |
293 | GRefPtr<GstBus> bus = adoptGRef(gst_pipeline_get_bus(GST_PIPELINE(m_pipeline.get()))); |
294 | ASSERT(bus); |
295 | gst_bus_set_sync_handler(bus.get(), [](GstBus*, GstMessage* message, gpointer userData) { |
296 | auto& reader = *static_cast<AudioFileReader*>(userData); |
297 | if (&reader.m_runLoop == &RunLoop::current()) |
298 | reader.handleMessage(message); |
299 | else { |
300 | GRefPtr<GstMessage> protectMessage(message); |
301 | auto weakThis = makeWeakPtr(reader); |
302 | reader.m_runLoop.dispatch([weakThis, protectMessage] { |
303 | if (weakThis) |
304 | weakThis->handleMessage(protectMessage.get()); |
305 | }); |
306 | } |
307 | gst_message_unref(message); |
308 | return GST_BUS_DROP; |
309 | }, this, nullptr); |
310 | |
311 | GstElement* source; |
312 | if (m_data) { |
313 | ASSERT(m_dataSize); |
314 | source = gst_element_factory_make("giostreamsrc" , nullptr); |
315 | GRefPtr<GInputStream> memoryStream = adoptGRef(g_memory_input_stream_new_from_data(m_data, m_dataSize, nullptr)); |
316 | g_object_set(source, "stream" , memoryStream.get(), nullptr); |
317 | } else { |
318 | source = gst_element_factory_make("filesrc" , nullptr); |
319 | g_object_set(source, "location" , m_filePath, nullptr); |
320 | } |
321 | |
322 | m_decodebin = gst_element_factory_make("decodebin" , "decodebin" ); |
323 | g_signal_connect_swapped(m_decodebin.get(), "pad-added" , G_CALLBACK(decodebinPadAddedCallback), this); |
324 | |
325 | gst_bin_add_many(GST_BIN(m_pipeline.get()), source, m_decodebin.get(), nullptr); |
326 | gst_element_link_pads_full(source, "src" , m_decodebin.get(), "sink" , GST_PAD_LINK_CHECK_NOTHING); |
327 | |
328 | // Catch errors here immediately, there might not be an error message if we're unlucky. |
329 | if (gst_element_set_state(m_pipeline.get(), GST_STATE_PAUSED) == GST_STATE_CHANGE_FAILURE) { |
330 | g_warning("Error: Failed to set pipeline to PAUSED" ); |
331 | m_errorOccurred = true; |
332 | m_runLoop.stop(); |
333 | } |
334 | } |
335 | |
336 | RefPtr<AudioBus> AudioFileReader::createBus(float sampleRate, bool mixToMono) |
337 | { |
338 | m_sampleRate = sampleRate; |
339 | m_channels = mixToMono ? 1 : 2; |
340 | |
341 | m_frontLeftBuffers = adoptGRef(gst_buffer_list_new()); |
342 | m_frontRightBuffers = adoptGRef(gst_buffer_list_new()); |
343 | |
344 | // Start the pipeline processing just after the loop is started. |
345 | m_runLoop.dispatch([this] { decodeAudioForBusCreation(); }); |
346 | m_runLoop.run(); |
347 | |
348 | // Set pipeline to GST_STATE_NULL state here already ASAP to |
349 | // release any resources that might still be used. |
350 | gst_element_set_state(m_pipeline.get(), GST_STATE_NULL); |
351 | |
352 | if (m_errorOccurred) |
353 | return nullptr; |
354 | |
355 | auto audioBus = AudioBus::create(m_channels, m_channelSize, true); |
356 | audioBus->setSampleRate(m_sampleRate); |
357 | |
358 | copyGstreamerBuffersToAudioChannel(m_frontLeftBuffers.get(), audioBus->channel(0)); |
359 | if (!mixToMono) |
360 | copyGstreamerBuffersToAudioChannel(m_frontRightBuffers.get(), audioBus->channel(1)); |
361 | |
362 | return audioBus; |
363 | } |
364 | |
365 | RefPtr<AudioBus> createBusFromAudioFile(const char* filePath, bool mixToMono, float sampleRate) |
366 | { |
367 | RefPtr<AudioBus> returnValue; |
368 | auto thread = Thread::create("AudioFileReader" , [&returnValue, filePath, mixToMono, sampleRate] { |
369 | returnValue = AudioFileReader(filePath).createBus(sampleRate, mixToMono); |
370 | }); |
371 | thread->waitForCompletion(); |
372 | return returnValue; |
373 | } |
374 | |
375 | RefPtr<AudioBus> createBusFromInMemoryAudioFile(const void* data, size_t dataSize, bool mixToMono, float sampleRate) |
376 | { |
377 | RefPtr<AudioBus> returnValue; |
378 | auto thread = Thread::create("AudioFileReader" , [&returnValue, data, dataSize, mixToMono, sampleRate] { |
379 | returnValue = AudioFileReader(data, dataSize).createBus(sampleRate, mixToMono); |
380 | }); |
381 | thread->waitForCompletion(); |
382 | return returnValue; |
383 | } |
384 | |
385 | } // WebCore |
386 | |
387 | #endif // ENABLE(WEB_AUDIO) |
388 | |