| 1 | /* |
| 2 | * Copyright (C) 2017-2018 Apple Inc. All rights reserved. |
| 3 | * |
| 4 | * Redistribution and use in source and binary forms, with or without |
| 5 | * modification, are permitted provided that the following conditions |
| 6 | * are met: |
| 7 | * 1. Redistributions of source code must retain the above copyright |
| 8 | * notice, this list of conditions and the following disclaimer. |
| 9 | * 2. Redistributions in binary form must reproduce the above copyright |
| 10 | * notice, this list of conditions and the following disclaimer in the |
| 11 | * documentation and/or other materials provided with the distribution. |
| 12 | * |
| 13 | * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY |
| 14 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED |
| 15 | * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE |
| 16 | * DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY |
| 17 | * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES |
| 18 | * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; |
| 19 | * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON |
| 20 | * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| 21 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS |
| 22 | * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 23 | */ |
| 24 | |
| 25 | #include "config.h" |
| 26 | #include "LibWebRTCMediaEndpoint.h" |
| 27 | |
| 28 | #if USE(LIBWEBRTC) |
| 29 | |
| 30 | #include "EventNames.h" |
| 31 | #include "JSRTCStatsReport.h" |
| 32 | #include "LibWebRTCDataChannelHandler.h" |
| 33 | #include "LibWebRTCPeerConnectionBackend.h" |
| 34 | #include "LibWebRTCProvider.h" |
| 35 | #include "LibWebRTCRtpReceiverBackend.h" |
| 36 | #include "LibWebRTCRtpSenderBackend.h" |
| 37 | #include "LibWebRTCRtpTransceiverBackend.h" |
| 38 | #include "LibWebRTCStatsCollector.h" |
| 39 | #include "LibWebRTCUtils.h" |
| 40 | #include "Logging.h" |
| 41 | #include "NotImplemented.h" |
| 42 | #include "Performance.h" |
| 43 | #include "PlatformStrategies.h" |
| 44 | #include "RTCDataChannel.h" |
| 45 | #include "RTCDataChannelEvent.h" |
| 46 | #include "RTCOfferOptions.h" |
| 47 | #include "RTCPeerConnection.h" |
| 48 | #include "RTCSessionDescription.h" |
| 49 | #include "RTCStatsReport.h" |
| 50 | #include "RealtimeIncomingAudioSource.h" |
| 51 | #include "RealtimeIncomingVideoSource.h" |
| 52 | #include "RealtimeOutgoingAudioSource.h" |
| 53 | #include "RealtimeOutgoingVideoSource.h" |
| 54 | #include "RuntimeEnabledFeatures.h" |
| 55 | #include <webrtc/rtc_base/physicalsocketserver.h> |
| 56 | #include <webrtc/p2p/base/basicpacketsocketfactory.h> |
| 57 | #include <webrtc/p2p/client/basicportallocator.h> |
| 58 | #include <webrtc/pc/peerconnectionfactory.h> |
| 59 | #include <webrtc/system_wrappers/include/field_trial.h> |
| 60 | #include <wtf/MainThread.h> |
| 61 | |
| 62 | namespace WebCore { |
| 63 | |
| 64 | LibWebRTCMediaEndpoint::LibWebRTCMediaEndpoint(LibWebRTCPeerConnectionBackend& peerConnection, LibWebRTCProvider& client) |
| 65 | : m_peerConnectionBackend(peerConnection) |
| 66 | , m_peerConnectionFactory(*client.factory()) |
| 67 | , m_createSessionDescriptionObserver(*this) |
| 68 | , m_setLocalSessionDescriptionObserver(*this) |
| 69 | , m_setRemoteSessionDescriptionObserver(*this) |
| 70 | , m_statsLogTimer(*this, &LibWebRTCMediaEndpoint::gatherStatsForLogging) |
| 71 | #if !RELEASE_LOG_DISABLED |
| 72 | , m_logger(peerConnection.logger()) |
| 73 | , m_logIdentifier(peerConnection.logIdentifier()) |
| 74 | #endif |
| 75 | { |
| 76 | ASSERT(isMainThread()); |
| 77 | ASSERT(client.factory()); |
| 78 | |
| 79 | if (RuntimeEnabledFeatures::sharedFeatures().webRTCH264SimulcastEnabled()) |
| 80 | webrtc::field_trial::InitFieldTrialsFromString("WebRTC-H264Simulcast/Enabled/" ); |
| 81 | } |
| 82 | |
| 83 | bool LibWebRTCMediaEndpoint::setConfiguration(LibWebRTCProvider& client, webrtc::PeerConnectionInterface::RTCConfiguration&& configuration) |
| 84 | { |
| 85 | if (RuntimeEnabledFeatures::sharedFeatures().webRTCUnifiedPlanEnabled()) |
| 86 | configuration.sdp_semantics = webrtc::SdpSemantics::kUnifiedPlan; |
| 87 | |
| 88 | if (!m_backend) { |
| 89 | m_backend = client.createPeerConnection(*this, WTFMove(configuration)); |
| 90 | return !!m_backend; |
| 91 | } |
| 92 | auto oldConfiguration = m_backend->GetConfiguration(); |
| 93 | configuration.certificates = oldConfiguration.certificates; |
| 94 | return m_backend->SetConfiguration(WTFMove(configuration)); |
| 95 | } |
| 96 | |
| 97 | static inline const char* sessionDescriptionType(RTCSdpType sdpType) |
| 98 | { |
| 99 | switch (sdpType) { |
| 100 | case RTCSdpType::Offer: |
| 101 | return "offer" ; |
| 102 | case RTCSdpType::Pranswer: |
| 103 | return "pranswer" ; |
| 104 | case RTCSdpType::Answer: |
| 105 | return "answer" ; |
| 106 | case RTCSdpType::Rollback: |
| 107 | return "rollback" ; |
| 108 | } |
| 109 | |
| 110 | ASSERT_NOT_REACHED(); |
| 111 | return "" ; |
| 112 | } |
| 113 | |
| 114 | static inline RTCSdpType fromSessionDescriptionType(const webrtc::SessionDescriptionInterface& description) |
| 115 | { |
| 116 | auto type = description.type(); |
| 117 | if (type == webrtc::SessionDescriptionInterface::kOffer) |
| 118 | return RTCSdpType::Offer; |
| 119 | if (type == webrtc::SessionDescriptionInterface::kAnswer) |
| 120 | return RTCSdpType::Answer; |
| 121 | ASSERT(type == webrtc::SessionDescriptionInterface::kPrAnswer); |
| 122 | return RTCSdpType::Pranswer; |
| 123 | } |
| 124 | |
| 125 | static inline RefPtr<RTCSessionDescription> fromSessionDescription(const webrtc::SessionDescriptionInterface* description) |
| 126 | { |
| 127 | if (!description) |
| 128 | return nullptr; |
| 129 | |
| 130 | std::string sdp; |
| 131 | description->ToString(&sdp); |
| 132 | |
| 133 | return RTCSessionDescription::create(fromSessionDescriptionType(*description), fromStdString(sdp)); |
| 134 | } |
| 135 | |
| 136 | // FIXME: We might want to create a new object only if the session actually changed for all description getters. |
| 137 | RefPtr<RTCSessionDescription> LibWebRTCMediaEndpoint::currentLocalDescription() const |
| 138 | { |
| 139 | return m_backend ? fromSessionDescription(m_backend->current_local_description()) : nullptr; |
| 140 | } |
| 141 | |
| 142 | RefPtr<RTCSessionDescription> LibWebRTCMediaEndpoint::currentRemoteDescription() const |
| 143 | { |
| 144 | return m_backend ? fromSessionDescription(m_backend->current_remote_description()) : nullptr; |
| 145 | } |
| 146 | |
| 147 | RefPtr<RTCSessionDescription> LibWebRTCMediaEndpoint::pendingLocalDescription() const |
| 148 | { |
| 149 | return m_backend ? fromSessionDescription(m_backend->pending_local_description()) : nullptr; |
| 150 | } |
| 151 | |
| 152 | RefPtr<RTCSessionDescription> LibWebRTCMediaEndpoint::pendingRemoteDescription() const |
| 153 | { |
| 154 | return m_backend ? fromSessionDescription(m_backend->pending_remote_description()) : nullptr; |
| 155 | } |
| 156 | |
| 157 | RefPtr<RTCSessionDescription> LibWebRTCMediaEndpoint::localDescription() const |
| 158 | { |
| 159 | return m_backend ? fromSessionDescription(m_backend->local_description()) : nullptr; |
| 160 | } |
| 161 | |
| 162 | RefPtr<RTCSessionDescription> LibWebRTCMediaEndpoint::remoteDescription() const |
| 163 | { |
| 164 | return m_backend ? fromSessionDescription(m_backend->remote_description()) : nullptr; |
| 165 | } |
| 166 | |
| 167 | void LibWebRTCMediaEndpoint::doSetLocalDescription(RTCSessionDescription& description) |
| 168 | { |
| 169 | ASSERT(m_backend); |
| 170 | |
| 171 | webrtc::SdpParseError error; |
| 172 | std::unique_ptr<webrtc::SessionDescriptionInterface> sessionDescription(webrtc::CreateSessionDescription(sessionDescriptionType(description.type()), description.sdp().utf8().data(), &error)); |
| 173 | |
| 174 | if (!sessionDescription) { |
| 175 | m_peerConnectionBackend.setLocalDescriptionFailed(Exception { OperationError, fromStdString(error.description) }); |
| 176 | return; |
| 177 | } |
| 178 | |
| 179 | // FIXME: See https://bugs.webkit.org/show_bug.cgi?id=173783. Remove this test once fixed at LibWebRTC level. |
| 180 | if (description.type() == RTCSdpType::Answer && !m_backend->pending_remote_description()) { |
| 181 | m_peerConnectionBackend.setLocalDescriptionFailed(Exception { InvalidStateError, "Failed to set local answer sdp: no pending remote description."_s }); |
| 182 | return; |
| 183 | } |
| 184 | |
| 185 | m_backend->SetLocalDescription(&m_setLocalSessionDescriptionObserver, sessionDescription.release()); |
| 186 | } |
| 187 | |
| 188 | void LibWebRTCMediaEndpoint::doSetRemoteDescription(RTCSessionDescription& description) |
| 189 | { |
| 190 | ASSERT(m_backend); |
| 191 | |
| 192 | webrtc::SdpParseError error; |
| 193 | std::unique_ptr<webrtc::SessionDescriptionInterface> sessionDescription(webrtc::CreateSessionDescription(sessionDescriptionType(description.type()), description.sdp().utf8().data(), &error)); |
| 194 | if (!sessionDescription) { |
| 195 | m_peerConnectionBackend.setRemoteDescriptionFailed(Exception { SyntaxError, fromStdString(error.description) }); |
| 196 | return; |
| 197 | } |
| 198 | m_backend->SetRemoteDescription(&m_setRemoteSessionDescriptionObserver, sessionDescription.release()); |
| 199 | |
| 200 | startLoggingStats(); |
| 201 | } |
| 202 | |
| 203 | bool LibWebRTCMediaEndpoint::addTrack(LibWebRTCRtpSenderBackend& sender, MediaStreamTrack& track, const Vector<String>& mediaStreamIds) |
| 204 | { |
| 205 | ASSERT(m_backend); |
| 206 | |
| 207 | if (!RuntimeEnabledFeatures::sharedFeatures().webRTCUnifiedPlanEnabled()) { |
| 208 | String mediaStreamId = mediaStreamIds.isEmpty() ? createCanonicalUUIDString() : mediaStreamIds[0]; |
| 209 | m_localStreams.ensure(mediaStreamId, [&] { |
| 210 | auto mediaStream = m_peerConnectionFactory.CreateLocalMediaStream(mediaStreamId.utf8().data()); |
| 211 | m_backend->AddStream(mediaStream); |
| 212 | return mediaStream; |
| 213 | }); |
| 214 | } |
| 215 | |
| 216 | LibWebRTCRtpSenderBackend::Source source; |
| 217 | rtc::scoped_refptr<webrtc::MediaStreamTrackInterface> rtcTrack; |
| 218 | switch (track.privateTrack().type()) { |
| 219 | case RealtimeMediaSource::Type::Audio: { |
| 220 | auto audioSource = RealtimeOutgoingAudioSource::create(track.privateTrack()); |
| 221 | rtcTrack = m_peerConnectionFactory.CreateAudioTrack(track.id().utf8().data(), audioSource.ptr()); |
| 222 | source = WTFMove(audioSource); |
| 223 | break; |
| 224 | } |
| 225 | case RealtimeMediaSource::Type::Video: { |
| 226 | auto videoSource = RealtimeOutgoingVideoSource::create(track.privateTrack()); |
| 227 | rtcTrack = m_peerConnectionFactory.CreateVideoTrack(track.id().utf8().data(), videoSource.ptr()); |
| 228 | source = WTFMove(videoSource); |
| 229 | break; |
| 230 | } |
| 231 | case RealtimeMediaSource::Type::None: |
| 232 | ASSERT_NOT_REACHED(); |
| 233 | return false; |
| 234 | } |
| 235 | |
| 236 | sender.setSource(WTFMove(source)); |
| 237 | if (auto rtpSender = sender.rtcSender()) { |
| 238 | rtpSender->SetTrack(rtcTrack.get()); |
| 239 | return true; |
| 240 | } |
| 241 | |
| 242 | std::vector<std::string> ids; |
| 243 | for (auto& id : mediaStreamIds) |
| 244 | ids.push_back(id.utf8().data()); |
| 245 | |
| 246 | auto newRTPSender = m_backend->AddTrack(rtcTrack.get(), WTFMove(ids)); |
| 247 | if (!newRTPSender.ok()) |
| 248 | return false; |
| 249 | sender.setRTCSender(newRTPSender.MoveValue()); |
| 250 | return true; |
| 251 | } |
| 252 | |
| 253 | void LibWebRTCMediaEndpoint::removeTrack(LibWebRTCRtpSenderBackend& sender) |
| 254 | { |
| 255 | ASSERT(m_backend); |
| 256 | m_backend->RemoveTrack(sender.rtcSender()); |
| 257 | sender.clearSource(); |
| 258 | } |
| 259 | |
| 260 | void LibWebRTCMediaEndpoint::doCreateOffer(const RTCOfferOptions& options) |
| 261 | { |
| 262 | ASSERT(m_backend); |
| 263 | |
| 264 | m_isInitiator = true; |
| 265 | webrtc::PeerConnectionInterface::RTCOfferAnswerOptions rtcOptions; |
| 266 | rtcOptions.ice_restart = options.iceRestart; |
| 267 | rtcOptions.voice_activity_detection = options.voiceActivityDetection; |
| 268 | |
| 269 | if (!RuntimeEnabledFeatures::sharedFeatures().webRTCUnifiedPlanEnabled()) { |
| 270 | if (m_peerConnectionBackend.shouldOfferAllowToReceive("audio"_s )) |
| 271 | rtcOptions.offer_to_receive_audio = webrtc::PeerConnectionInterface::RTCOfferAnswerOptions::kOfferToReceiveMediaTrue; |
| 272 | if (m_peerConnectionBackend.shouldOfferAllowToReceive("video"_s )) |
| 273 | rtcOptions.offer_to_receive_video = webrtc::PeerConnectionInterface::RTCOfferAnswerOptions::kOfferToReceiveMediaTrue; |
| 274 | } |
| 275 | m_backend->CreateOffer(&m_createSessionDescriptionObserver, rtcOptions); |
| 276 | } |
| 277 | |
| 278 | void LibWebRTCMediaEndpoint::doCreateAnswer() |
| 279 | { |
| 280 | ASSERT(m_backend); |
| 281 | |
| 282 | m_isInitiator = false; |
| 283 | m_backend->CreateAnswer(&m_createSessionDescriptionObserver, { }); |
| 284 | } |
| 285 | |
| 286 | rtc::scoped_refptr<LibWebRTCStatsCollector> LibWebRTCMediaEndpoint::createStatsCollector(Ref<DeferredPromise>&& promise) |
| 287 | { |
| 288 | return LibWebRTCStatsCollector::create([promise = WTFMove(promise), protectedThis = makeRef(*this)]() mutable -> RefPtr<RTCStatsReport> { |
| 289 | ASSERT(isMainThread()); |
| 290 | if (protectedThis->isStopped()) |
| 291 | return nullptr; |
| 292 | |
| 293 | auto report = RTCStatsReport::create(); |
| 294 | |
| 295 | promise->resolve<IDLInterface<RTCStatsReport>>(report.copyRef()); |
| 296 | |
| 297 | // The promise resolution might fail in which case no backing map will be created. |
| 298 | if (!report->backingMap()) |
| 299 | return nullptr; |
| 300 | return report; |
| 301 | }); |
| 302 | } |
| 303 | |
| 304 | void LibWebRTCMediaEndpoint::getStats(Ref<DeferredPromise>&& promise) |
| 305 | { |
| 306 | if (m_backend) |
| 307 | m_backend->GetStats(createStatsCollector(WTFMove(promise))); |
| 308 | } |
| 309 | |
| 310 | void LibWebRTCMediaEndpoint::getStats(webrtc::RtpReceiverInterface& receiver, Ref<DeferredPromise>&& promise) |
| 311 | { |
| 312 | if (m_backend) |
| 313 | m_backend->GetStats(rtc::scoped_refptr<webrtc::RtpReceiverInterface>(&receiver), createStatsCollector(WTFMove(promise))); |
| 314 | } |
| 315 | |
| 316 | void LibWebRTCMediaEndpoint::getStats(webrtc::RtpSenderInterface& sender, Ref<DeferredPromise>&& promise) |
| 317 | { |
| 318 | if (m_backend) |
| 319 | m_backend->GetStats(rtc::scoped_refptr<webrtc::RtpSenderInterface>(&sender), createStatsCollector(WTFMove(promise))); |
| 320 | } |
| 321 | |
| 322 | static RTCSignalingState signalingState(webrtc::PeerConnectionInterface::SignalingState state) |
| 323 | { |
| 324 | switch (state) { |
| 325 | case webrtc::PeerConnectionInterface::kStable: |
| 326 | return RTCSignalingState::Stable; |
| 327 | case webrtc::PeerConnectionInterface::kHaveLocalOffer: |
| 328 | return RTCSignalingState::HaveLocalOffer; |
| 329 | case webrtc::PeerConnectionInterface::kHaveLocalPrAnswer: |
| 330 | return RTCSignalingState::HaveLocalPranswer; |
| 331 | case webrtc::PeerConnectionInterface::kHaveRemoteOffer: |
| 332 | return RTCSignalingState::HaveRemoteOffer; |
| 333 | case webrtc::PeerConnectionInterface::kHaveRemotePrAnswer: |
| 334 | return RTCSignalingState::HaveRemotePranswer; |
| 335 | case webrtc::PeerConnectionInterface::kClosed: |
| 336 | return RTCSignalingState::Stable; |
| 337 | } |
| 338 | |
| 339 | ASSERT_NOT_REACHED(); |
| 340 | return RTCSignalingState::Stable; |
| 341 | } |
| 342 | |
| 343 | void LibWebRTCMediaEndpoint::OnSignalingChange(webrtc::PeerConnectionInterface::SignalingState rtcState) |
| 344 | { |
| 345 | auto state = signalingState(rtcState); |
| 346 | callOnMainThread([protectedThis = makeRef(*this), state] { |
| 347 | if (protectedThis->isStopped()) |
| 348 | return; |
| 349 | protectedThis->m_peerConnectionBackend.updateSignalingState(state); |
| 350 | }); |
| 351 | } |
| 352 | |
| 353 | MediaStream& LibWebRTCMediaEndpoint::mediaStreamFromRTCStream(webrtc::MediaStreamInterface& rtcStream) |
| 354 | { |
| 355 | auto label = fromStdString(rtcStream.id()); |
| 356 | auto mediaStream = m_remoteStreamsById.ensure(label, [label, this]() mutable { |
| 357 | return MediaStream::create(*m_peerConnectionBackend.connection().scriptExecutionContext(), MediaStreamPrivate::create({ }, WTFMove(label))); |
| 358 | }); |
| 359 | return *mediaStream.iterator->value; |
| 360 | } |
| 361 | |
| 362 | void LibWebRTCMediaEndpoint::addRemoteStream(webrtc::MediaStreamInterface&) |
| 363 | { |
| 364 | } |
| 365 | |
| 366 | void LibWebRTCMediaEndpoint::addRemoteTrack(rtc::scoped_refptr<webrtc::RtpReceiverInterface>&& rtcReceiver, const std::vector<rtc::scoped_refptr<webrtc::MediaStreamInterface>>& rtcStreams) |
| 367 | { |
| 368 | ASSERT(rtcReceiver); |
| 369 | RefPtr<RTCRtpReceiver> receiver; |
| 370 | RefPtr<RealtimeMediaSource> remoteSource; |
| 371 | |
| 372 | auto* rtcTrack = rtcReceiver->track().get(); |
| 373 | |
| 374 | switch (rtcReceiver->media_type()) { |
| 375 | case cricket::MEDIA_TYPE_DATA: |
| 376 | return; |
| 377 | case cricket::MEDIA_TYPE_AUDIO: { |
| 378 | rtc::scoped_refptr<webrtc::AudioTrackInterface> audioTrack = static_cast<webrtc::AudioTrackInterface*>(rtcTrack); |
| 379 | auto audioReceiver = m_peerConnectionBackend.audioReceiver(fromStdString(rtcTrack->id())); |
| 380 | |
| 381 | receiver = WTFMove(audioReceiver.receiver); |
| 382 | audioReceiver.source->setSourceTrack(WTFMove(audioTrack)); |
| 383 | break; |
| 384 | } |
| 385 | case cricket::MEDIA_TYPE_VIDEO: { |
| 386 | rtc::scoped_refptr<webrtc::VideoTrackInterface> videoTrack = static_cast<webrtc::VideoTrackInterface*>(rtcTrack); |
| 387 | auto videoReceiver = m_peerConnectionBackend.videoReceiver(fromStdString(rtcTrack->id())); |
| 388 | |
| 389 | receiver = WTFMove(videoReceiver.receiver); |
| 390 | videoReceiver.source->setSourceTrack(WTFMove(videoTrack)); |
| 391 | break; |
| 392 | } |
| 393 | } |
| 394 | |
| 395 | receiver->setBackend(std::make_unique<LibWebRTCRtpReceiverBackend>(WTFMove(rtcReceiver))); |
| 396 | auto& track = receiver->track(); |
| 397 | addPendingTrackEvent(receiver.releaseNonNull(), track, rtcStreams, nullptr); |
| 398 | } |
| 399 | |
| 400 | void LibWebRTCMediaEndpoint::addPendingTrackEvent(Ref<RTCRtpReceiver>&& receiver, MediaStreamTrack& track, const std::vector<rtc::scoped_refptr<webrtc::MediaStreamInterface>>& rtcStreams, RefPtr<RTCRtpTransceiver>&& transceiver) |
| 401 | { |
| 402 | Vector<RefPtr<MediaStream>> streams; |
| 403 | for (auto& rtcStream : rtcStreams) { |
| 404 | auto& mediaStream = mediaStreamFromRTCStream(*rtcStream.get()); |
| 405 | streams.append(&mediaStream); |
| 406 | mediaStream.addTrackFromPlatform(track); |
| 407 | } |
| 408 | auto streamIds = WTF::map(streams, [](auto& stream) -> String { |
| 409 | return stream->id(); |
| 410 | }); |
| 411 | m_remoteStreamsFromRemoteTrack.add(&track, WTFMove(streamIds)); |
| 412 | |
| 413 | m_peerConnectionBackend.addPendingTrackEvent({ WTFMove(receiver), makeRef(track), WTFMove(streams), WTFMove(transceiver) }); |
| 414 | } |
| 415 | |
| 416 | static inline void setExistingReceiverSourceTrack(RealtimeMediaSource& existingSource, webrtc::RtpReceiverInterface& rtcReceiver) |
| 417 | { |
| 418 | switch (rtcReceiver.media_type()) { |
| 419 | case cricket::MEDIA_TYPE_AUDIO: { |
| 420 | ASSERT(existingSource.type() == RealtimeMediaSource::Type::Audio); |
| 421 | rtc::scoped_refptr<webrtc::AudioTrackInterface> audioTrack = static_cast<webrtc::AudioTrackInterface*>(rtcReceiver.track().get()); |
| 422 | downcast<RealtimeIncomingAudioSource>(existingSource).setSourceTrack(WTFMove(audioTrack)); |
| 423 | return; |
| 424 | } |
| 425 | case cricket::MEDIA_TYPE_VIDEO: { |
| 426 | ASSERT(existingSource.type() == RealtimeMediaSource::Type::Video); |
| 427 | rtc::scoped_refptr<webrtc::VideoTrackInterface> videoTrack = static_cast<webrtc::VideoTrackInterface*>(rtcReceiver.track().get()); |
| 428 | downcast<RealtimeIncomingVideoSource>(existingSource).setSourceTrack(WTFMove(videoTrack)); |
| 429 | return; |
| 430 | } |
| 431 | case cricket::MEDIA_TYPE_DATA: |
| 432 | ASSERT_NOT_REACHED(); |
| 433 | return; |
| 434 | } |
| 435 | } |
| 436 | |
| 437 | RefPtr<RealtimeMediaSource> LibWebRTCMediaEndpoint::sourceFromNewReceiver(webrtc::RtpReceiverInterface& rtcReceiver) |
| 438 | { |
| 439 | auto rtcTrack = rtcReceiver.track(); |
| 440 | switch (rtcReceiver.media_type()) { |
| 441 | case cricket::MEDIA_TYPE_DATA: |
| 442 | return nullptr; |
| 443 | case cricket::MEDIA_TYPE_AUDIO: { |
| 444 | rtc::scoped_refptr<webrtc::AudioTrackInterface> audioTrack = static_cast<webrtc::AudioTrackInterface*>(rtcTrack.get()); |
| 445 | return RealtimeIncomingAudioSource::create(WTFMove(audioTrack), fromStdString(rtcTrack->id())); |
| 446 | } |
| 447 | case cricket::MEDIA_TYPE_VIDEO: { |
| 448 | rtc::scoped_refptr<webrtc::VideoTrackInterface> videoTrack = static_cast<webrtc::VideoTrackInterface*>(rtcTrack.get()); |
| 449 | return RealtimeIncomingVideoSource::create(WTFMove(videoTrack), fromStdString(rtcTrack->id())); |
| 450 | } |
| 451 | } |
| 452 | |
| 453 | RELEASE_ASSERT_NOT_REACHED(); |
| 454 | } |
| 455 | |
| 456 | void LibWebRTCMediaEndpoint::collectTransceivers() |
| 457 | { |
| 458 | if (!m_backend) |
| 459 | return; |
| 460 | |
| 461 | if (!RuntimeEnabledFeatures::sharedFeatures().webRTCUnifiedPlanEnabled()) |
| 462 | return; |
| 463 | |
| 464 | for (auto& rtcTransceiver : m_backend->GetTransceivers()) { |
| 465 | auto* existingTransceiver = m_peerConnectionBackend.existingTransceiver([&](auto& transceiverBackend) { |
| 466 | return rtcTransceiver.get() == transceiverBackend.rtcTransceiver(); |
| 467 | }); |
| 468 | if (existingTransceiver) |
| 469 | continue; |
| 470 | |
| 471 | auto rtcReceiver = rtcTransceiver->receiver(); |
| 472 | auto source = sourceFromNewReceiver(*rtcReceiver); |
| 473 | if (!source) |
| 474 | return; |
| 475 | |
| 476 | m_peerConnectionBackend.newRemoteTransceiver(std::make_unique<LibWebRTCRtpTransceiverBackend>(WTFMove(rtcTransceiver)), source.releaseNonNull()); |
| 477 | } |
| 478 | } |
| 479 | |
| 480 | void LibWebRTCMediaEndpoint::newTransceiver(rtc::scoped_refptr<webrtc::RtpTransceiverInterface>&& rtcTransceiver) |
| 481 | { |
| 482 | auto* transceiver = m_peerConnectionBackend.existingTransceiver([&](auto& transceiverBackend) { |
| 483 | return rtcTransceiver.get() == transceiverBackend.rtcTransceiver(); |
| 484 | }); |
| 485 | if (transceiver) { |
| 486 | auto rtcReceiver = rtcTransceiver->receiver(); |
| 487 | setExistingReceiverSourceTrack(transceiver->receiver().track().source(), *rtcReceiver); |
| 488 | addPendingTrackEvent(makeRef(transceiver->receiver()), transceiver->receiver().track(), rtcReceiver->streams(), makeRef(*transceiver)); |
| 489 | return; |
| 490 | } |
| 491 | |
| 492 | auto rtcReceiver = rtcTransceiver->receiver(); |
| 493 | auto source = sourceFromNewReceiver(*rtcReceiver); |
| 494 | if (!source) |
| 495 | return; |
| 496 | |
| 497 | auto& newTransceiver = m_peerConnectionBackend.newRemoteTransceiver(std::make_unique<LibWebRTCRtpTransceiverBackend>(WTFMove(rtcTransceiver)), source.releaseNonNull()); |
| 498 | |
| 499 | addPendingTrackEvent(makeRef(newTransceiver.receiver()), newTransceiver.receiver().track(), rtcReceiver->streams(), makeRef(newTransceiver)); |
| 500 | } |
| 501 | |
| 502 | void LibWebRTCMediaEndpoint::removeRemoteTrack(rtc::scoped_refptr<webrtc::RtpReceiverInterface>&& receiver) |
| 503 | { |
| 504 | // FIXME: Support plan B code path. |
| 505 | if (!RuntimeEnabledFeatures::sharedFeatures().webRTCUnifiedPlanEnabled()) |
| 506 | return; |
| 507 | |
| 508 | auto* transceiver = m_peerConnectionBackend.existingTransceiver([&receiver](auto& transceiverBackend) { |
| 509 | auto* rtcTransceiver = transceiverBackend.rtcTransceiver(); |
| 510 | return rtcTransceiver && receiver.get() == rtcTransceiver->receiver().get(); |
| 511 | }); |
| 512 | if (!transceiver) |
| 513 | return; |
| 514 | |
| 515 | auto& track = transceiver->receiver().track(); |
| 516 | |
| 517 | for (auto& id : m_remoteStreamsFromRemoteTrack.get(&track)) { |
| 518 | if (auto stream = m_remoteStreamsById.get(id)) |
| 519 | stream->privateStream().removeTrack(track.privateTrack(), MediaStreamPrivate::NotifyClientOption::Notify); |
| 520 | } |
| 521 | |
| 522 | track.source().setMuted(true); |
| 523 | } |
| 524 | |
| 525 | template<typename T> |
| 526 | Optional<LibWebRTCMediaEndpoint::Backends> LibWebRTCMediaEndpoint::createTransceiverBackends(T&& trackOrKind, const RTCRtpTransceiverInit& init, LibWebRTCRtpSenderBackend::Source&& source) |
| 527 | { |
| 528 | auto result = m_backend->AddTransceiver(WTFMove(trackOrKind), fromRtpTransceiverInit(init)); |
| 529 | if (!result.ok()) |
| 530 | return WTF::nullopt; |
| 531 | |
| 532 | auto transceiver = std::make_unique<LibWebRTCRtpTransceiverBackend>(result.MoveValue()); |
| 533 | return LibWebRTCMediaEndpoint::Backends { transceiver->createSenderBackend(m_peerConnectionBackend, WTFMove(source)), transceiver->createReceiverBackend(), WTFMove(transceiver) }; |
| 534 | } |
| 535 | |
| 536 | Optional<LibWebRTCMediaEndpoint::Backends> LibWebRTCMediaEndpoint::addTransceiver(const String& trackKind, const RTCRtpTransceiverInit& init) |
| 537 | { |
| 538 | auto type = trackKind == "audio" ? cricket::MediaType::MEDIA_TYPE_AUDIO : cricket::MediaType::MEDIA_TYPE_VIDEO; |
| 539 | return createTransceiverBackends(type, init, nullptr); |
| 540 | } |
| 541 | |
| 542 | std::pair<LibWebRTCRtpSenderBackend::Source, rtc::scoped_refptr<webrtc::MediaStreamTrackInterface>> LibWebRTCMediaEndpoint::createSourceAndRTCTrack(MediaStreamTrack& track) |
| 543 | { |
| 544 | LibWebRTCRtpSenderBackend::Source source; |
| 545 | rtc::scoped_refptr<webrtc::MediaStreamTrackInterface> rtcTrack; |
| 546 | switch (track.privateTrack().type()) { |
| 547 | case RealtimeMediaSource::Type::None: |
| 548 | ASSERT_NOT_REACHED(); |
| 549 | break; |
| 550 | case RealtimeMediaSource::Type::Audio: { |
| 551 | auto audioSource = RealtimeOutgoingAudioSource::create(track.privateTrack()); |
| 552 | rtcTrack = m_peerConnectionFactory.CreateAudioTrack(track.id().utf8().data(), audioSource.ptr()); |
| 553 | source = WTFMove(audioSource); |
| 554 | break; |
| 555 | } |
| 556 | case RealtimeMediaSource::Type::Video: { |
| 557 | auto videoSource = RealtimeOutgoingVideoSource::create(track.privateTrack()); |
| 558 | rtcTrack = m_peerConnectionFactory.CreateVideoTrack(track.id().utf8().data(), videoSource.ptr()); |
| 559 | source = WTFMove(videoSource); |
| 560 | break; |
| 561 | } |
| 562 | } |
| 563 | return std::make_pair(WTFMove(source), WTFMove(rtcTrack)); |
| 564 | } |
| 565 | |
| 566 | Optional<LibWebRTCMediaEndpoint::Backends> LibWebRTCMediaEndpoint::addTransceiver(MediaStreamTrack& track, const RTCRtpTransceiverInit& init) |
| 567 | { |
| 568 | auto sourceAndTrack = createSourceAndRTCTrack(track); |
| 569 | return createTransceiverBackends(WTFMove(sourceAndTrack.second), init, WTFMove(sourceAndTrack.first)); |
| 570 | } |
| 571 | |
| 572 | void LibWebRTCMediaEndpoint::setSenderSourceFromTrack(LibWebRTCRtpSenderBackend& sender, MediaStreamTrack& track) |
| 573 | { |
| 574 | auto sourceAndTrack = createSourceAndRTCTrack(track); |
| 575 | sender.setSource(WTFMove(sourceAndTrack.first)); |
| 576 | sender.rtcSender()->SetTrack(WTFMove(sourceAndTrack.second)); |
| 577 | } |
| 578 | |
| 579 | std::unique_ptr<LibWebRTCRtpTransceiverBackend> LibWebRTCMediaEndpoint::transceiverBackendFromSender(LibWebRTCRtpSenderBackend& backend) |
| 580 | { |
| 581 | for (auto& transceiver : m_backend->GetTransceivers()) { |
| 582 | if (transceiver->sender().get() == backend.rtcSender()) |
| 583 | return std::make_unique<LibWebRTCRtpTransceiverBackend>(rtc::scoped_refptr<webrtc::RtpTransceiverInterface>(transceiver)); |
| 584 | } |
| 585 | return nullptr; |
| 586 | } |
| 587 | |
| 588 | |
| 589 | void LibWebRTCMediaEndpoint::removeRemoteStream(webrtc::MediaStreamInterface& rtcStream) |
| 590 | { |
| 591 | bool removed = m_remoteStreamsById.remove(fromStdString(rtcStream.id())); |
| 592 | ASSERT_UNUSED(removed, removed); |
| 593 | } |
| 594 | |
| 595 | void LibWebRTCMediaEndpoint::OnAddStream(rtc::scoped_refptr<webrtc::MediaStreamInterface> stream) |
| 596 | { |
| 597 | callOnMainThread([protectedThis = makeRef(*this), stream = WTFMove(stream)] { |
| 598 | if (protectedThis->isStopped()) |
| 599 | return; |
| 600 | ASSERT(stream); |
| 601 | protectedThis->addRemoteStream(*stream.get()); |
| 602 | }); |
| 603 | } |
| 604 | |
| 605 | void LibWebRTCMediaEndpoint::OnRemoveStream(rtc::scoped_refptr<webrtc::MediaStreamInterface> stream) |
| 606 | { |
| 607 | callOnMainThread([protectedThis = makeRef(*this), stream = WTFMove(stream)] { |
| 608 | if (protectedThis->isStopped()) |
| 609 | return; |
| 610 | ASSERT(stream); |
| 611 | protectedThis->removeRemoteStream(*stream.get()); |
| 612 | }); |
| 613 | } |
| 614 | |
| 615 | void LibWebRTCMediaEndpoint::OnAddTrack(rtc::scoped_refptr<webrtc::RtpReceiverInterface> receiver, const std::vector<rtc::scoped_refptr<webrtc::MediaStreamInterface>>& streams) |
| 616 | { |
| 617 | if (RuntimeEnabledFeatures::sharedFeatures().webRTCUnifiedPlanEnabled()) |
| 618 | return; |
| 619 | |
| 620 | callOnMainThread([protectedThis = makeRef(*this), receiver = WTFMove(receiver), streams]() mutable { |
| 621 | if (protectedThis->isStopped()) |
| 622 | return; |
| 623 | protectedThis->addRemoteTrack(WTFMove(receiver), streams); |
| 624 | }); |
| 625 | } |
| 626 | |
| 627 | void LibWebRTCMediaEndpoint::OnTrack(rtc::scoped_refptr<webrtc::RtpTransceiverInterface> transceiver) |
| 628 | { |
| 629 | if (!RuntimeEnabledFeatures::sharedFeatures().webRTCUnifiedPlanEnabled()) |
| 630 | return; |
| 631 | |
| 632 | callOnMainThread([protectedThis = makeRef(*this), transceiver = WTFMove(transceiver)]() mutable { |
| 633 | if (protectedThis->isStopped()) |
| 634 | return; |
| 635 | protectedThis->newTransceiver(WTFMove(transceiver)); |
| 636 | }); |
| 637 | } |
| 638 | |
| 639 | void LibWebRTCMediaEndpoint::OnRemoveTrack(rtc::scoped_refptr<webrtc::RtpReceiverInterface> receiver) |
| 640 | { |
| 641 | callOnMainThread([protectedThis = makeRef(*this), receiver = WTFMove(receiver)]() mutable { |
| 642 | if (protectedThis->isStopped()) |
| 643 | return; |
| 644 | protectedThis->removeRemoteTrack(WTFMove(receiver)); |
| 645 | }); |
| 646 | } |
| 647 | |
| 648 | std::unique_ptr<RTCDataChannelHandler> LibWebRTCMediaEndpoint::createDataChannel(const String& label, const RTCDataChannelInit& options) |
| 649 | { |
| 650 | auto init = LibWebRTCDataChannelHandler::fromRTCDataChannelInit(options); |
| 651 | auto channel = m_backend->CreateDataChannel(label.utf8().data(), &init); |
| 652 | return channel ? std::make_unique<LibWebRTCDataChannelHandler>(WTFMove(channel)) : nullptr; |
| 653 | } |
| 654 | |
| 655 | void LibWebRTCMediaEndpoint::OnDataChannel(rtc::scoped_refptr<webrtc::DataChannelInterface> dataChannel) |
| 656 | { |
| 657 | callOnMainThread([protectedThis = makeRef(*this), dataChannel = WTFMove(dataChannel)]() mutable { |
| 658 | if (protectedThis->isStopped()) |
| 659 | return; |
| 660 | auto& connection = protectedThis->m_peerConnectionBackend.connection(); |
| 661 | connection.fireEvent(LibWebRTCDataChannelHandler::channelEvent(*connection.scriptExecutionContext(), WTFMove(dataChannel))); |
| 662 | }); |
| 663 | } |
| 664 | |
| 665 | void LibWebRTCMediaEndpoint::stop() |
| 666 | { |
| 667 | if (!m_backend) |
| 668 | return; |
| 669 | |
| 670 | stopLoggingStats(); |
| 671 | |
| 672 | m_backend->Close(); |
| 673 | m_backend = nullptr; |
| 674 | m_remoteStreamsById.clear(); |
| 675 | m_remoteStreamsFromRemoteTrack.clear(); |
| 676 | } |
| 677 | |
| 678 | void LibWebRTCMediaEndpoint::OnRenegotiationNeeded() |
| 679 | { |
| 680 | callOnMainThread([protectedThis = makeRef(*this)] { |
| 681 | if (protectedThis->isStopped()) |
| 682 | return; |
| 683 | protectedThis->m_peerConnectionBackend.markAsNeedingNegotiation(); |
| 684 | }); |
| 685 | } |
| 686 | |
| 687 | static inline RTCIceConnectionState toRTCIceConnectionState(webrtc::PeerConnectionInterface::IceConnectionState state) |
| 688 | { |
| 689 | switch (state) { |
| 690 | case webrtc::PeerConnectionInterface::kIceConnectionNew: |
| 691 | return RTCIceConnectionState::New; |
| 692 | case webrtc::PeerConnectionInterface::kIceConnectionChecking: |
| 693 | return RTCIceConnectionState::Checking; |
| 694 | case webrtc::PeerConnectionInterface::kIceConnectionConnected: |
| 695 | return RTCIceConnectionState::Connected; |
| 696 | case webrtc::PeerConnectionInterface::kIceConnectionCompleted: |
| 697 | return RTCIceConnectionState::Completed; |
| 698 | case webrtc::PeerConnectionInterface::kIceConnectionFailed: |
| 699 | return RTCIceConnectionState::Failed; |
| 700 | case webrtc::PeerConnectionInterface::kIceConnectionDisconnected: |
| 701 | return RTCIceConnectionState::Disconnected; |
| 702 | case webrtc::PeerConnectionInterface::kIceConnectionClosed: |
| 703 | return RTCIceConnectionState::Closed; |
| 704 | case webrtc::PeerConnectionInterface::kIceConnectionMax: |
| 705 | break; |
| 706 | } |
| 707 | |
| 708 | ASSERT_NOT_REACHED(); |
| 709 | return RTCIceConnectionState::New; |
| 710 | } |
| 711 | |
| 712 | void LibWebRTCMediaEndpoint::OnIceConnectionChange(webrtc::PeerConnectionInterface::IceConnectionState state) |
| 713 | { |
| 714 | auto connectionState = toRTCIceConnectionState(state); |
| 715 | callOnMainThread([protectedThis = makeRef(*this), connectionState] { |
| 716 | if (protectedThis->isStopped()) |
| 717 | return; |
| 718 | if (protectedThis->m_peerConnectionBackend.connection().iceConnectionState() != connectionState) |
| 719 | protectedThis->m_peerConnectionBackend.connection().updateIceConnectionState(connectionState); |
| 720 | }); |
| 721 | } |
| 722 | |
| 723 | void LibWebRTCMediaEndpoint::OnIceGatheringChange(webrtc::PeerConnectionInterface::IceGatheringState state) |
| 724 | { |
| 725 | callOnMainThread([protectedThis = makeRef(*this), state] { |
| 726 | if (protectedThis->isStopped()) |
| 727 | return; |
| 728 | if (state == webrtc::PeerConnectionInterface::kIceGatheringComplete) |
| 729 | protectedThis->m_peerConnectionBackend.doneGatheringCandidates(); |
| 730 | else if (state == webrtc::PeerConnectionInterface::kIceGatheringGathering) |
| 731 | protectedThis->m_peerConnectionBackend.connection().updateIceGatheringState(RTCIceGatheringState::Gathering); |
| 732 | }); |
| 733 | } |
| 734 | |
| 735 | void LibWebRTCMediaEndpoint::OnIceCandidate(const webrtc::IceCandidateInterface *rtcCandidate) |
| 736 | { |
| 737 | ASSERT(rtcCandidate); |
| 738 | |
| 739 | std::string sdp; |
| 740 | rtcCandidate->ToString(&sdp); |
| 741 | |
| 742 | auto sdpMLineIndex = safeCast<unsigned short>(rtcCandidate->sdp_mline_index()); |
| 743 | |
| 744 | callOnMainThread([protectedThis = makeRef(*this), mid = fromStdString(rtcCandidate->sdp_mid()), sdp = fromStdString(sdp), sdpMLineIndex, url = fromStdString(rtcCandidate->server_url())]() mutable { |
| 745 | if (protectedThis->isStopped()) |
| 746 | return; |
| 747 | protectedThis->m_peerConnectionBackend.newICECandidate(WTFMove(sdp), WTFMove(mid), sdpMLineIndex, WTFMove(url)); |
| 748 | }); |
| 749 | } |
| 750 | |
| 751 | void LibWebRTCMediaEndpoint::OnIceCandidatesRemoved(const std::vector<cricket::Candidate>&) |
| 752 | { |
| 753 | ASSERT_NOT_REACHED(); |
| 754 | } |
| 755 | |
| 756 | void LibWebRTCMediaEndpoint::createSessionDescriptionSucceeded(std::unique_ptr<webrtc::SessionDescriptionInterface>&& description) |
| 757 | { |
| 758 | std::string sdp; |
| 759 | description->ToString(&sdp); |
| 760 | |
| 761 | callOnMainThread([protectedThis = makeRef(*this), sdp = fromStdString(sdp)]() mutable { |
| 762 | if (protectedThis->isStopped()) |
| 763 | return; |
| 764 | if (protectedThis->m_isInitiator) |
| 765 | protectedThis->m_peerConnectionBackend.createOfferSucceeded(WTFMove(sdp)); |
| 766 | else |
| 767 | protectedThis->m_peerConnectionBackend.createAnswerSucceeded(WTFMove(sdp)); |
| 768 | }); |
| 769 | } |
| 770 | |
| 771 | void LibWebRTCMediaEndpoint::createSessionDescriptionFailed(ExceptionCode errorCode, const char* errorMessage) |
| 772 | { |
| 773 | callOnMainThread([protectedThis = makeRef(*this), errorCode, errorMessage = String(errorMessage)] () mutable { |
| 774 | if (protectedThis->isStopped()) |
| 775 | return; |
| 776 | if (protectedThis->m_isInitiator) |
| 777 | protectedThis->m_peerConnectionBackend.createOfferFailed(Exception { errorCode, WTFMove(errorMessage) }); |
| 778 | else |
| 779 | protectedThis->m_peerConnectionBackend.createAnswerFailed(Exception { errorCode, WTFMove(errorMessage) }); |
| 780 | }); |
| 781 | } |
| 782 | |
| 783 | void LibWebRTCMediaEndpoint::setLocalSessionDescriptionSucceeded() |
| 784 | { |
| 785 | callOnMainThread([protectedThis = makeRef(*this)] { |
| 786 | if (protectedThis->isStopped()) |
| 787 | return; |
| 788 | protectedThis->m_peerConnectionBackend.setLocalDescriptionSucceeded(); |
| 789 | }); |
| 790 | } |
| 791 | |
| 792 | void LibWebRTCMediaEndpoint::setLocalSessionDescriptionFailed(ExceptionCode errorCode, const char* errorMessage) |
| 793 | { |
| 794 | callOnMainThread([protectedThis = makeRef(*this), errorCode, errorMessage = String(errorMessage)] () mutable { |
| 795 | if (protectedThis->isStopped()) |
| 796 | return; |
| 797 | protectedThis->m_peerConnectionBackend.setLocalDescriptionFailed(Exception { errorCode, WTFMove(errorMessage) }); |
| 798 | }); |
| 799 | } |
| 800 | |
| 801 | void LibWebRTCMediaEndpoint::setRemoteSessionDescriptionSucceeded() |
| 802 | { |
| 803 | callOnMainThread([protectedThis = makeRef(*this)] { |
| 804 | if (protectedThis->isStopped()) |
| 805 | return; |
| 806 | protectedThis->m_peerConnectionBackend.setRemoteDescriptionSucceeded(); |
| 807 | }); |
| 808 | } |
| 809 | |
| 810 | void LibWebRTCMediaEndpoint::setRemoteSessionDescriptionFailed(ExceptionCode errorCode, const char* errorMessage) |
| 811 | { |
| 812 | callOnMainThread([protectedThis = makeRef(*this), errorCode, errorMessage = String(errorMessage)] () mutable { |
| 813 | if (protectedThis->isStopped()) |
| 814 | return; |
| 815 | protectedThis->m_peerConnectionBackend.setRemoteDescriptionFailed(Exception { errorCode, WTFMove(errorMessage) }); |
| 816 | }); |
| 817 | } |
| 818 | |
| 819 | void LibWebRTCMediaEndpoint::gatherStatsForLogging() |
| 820 | { |
| 821 | m_backend->GetStats(this); |
| 822 | } |
| 823 | |
| 824 | class RTCStatsLogger { |
| 825 | public: |
| 826 | explicit RTCStatsLogger(const webrtc::RTCStats& stats) |
| 827 | : m_stats(stats) |
| 828 | { |
| 829 | } |
| 830 | |
| 831 | String toJSONString() const { return String(m_stats.ToJson().c_str()); } |
| 832 | |
| 833 | private: |
| 834 | const webrtc::RTCStats& m_stats; |
| 835 | }; |
| 836 | |
| 837 | void LibWebRTCMediaEndpoint::OnStatsDelivered(const rtc::scoped_refptr<const webrtc::RTCStatsReport>& report) |
| 838 | { |
| 839 | #if !RELEASE_LOG_DISABLED |
| 840 | int64_t timestamp = report->timestamp_us(); |
| 841 | if (!m_statsFirstDeliveredTimestamp) |
| 842 | m_statsFirstDeliveredTimestamp = timestamp; |
| 843 | |
| 844 | callOnMainThread([protectedThis = makeRef(*this), this, timestamp, report] { |
| 845 | if (m_backend && m_statsLogTimer.repeatInterval() != statsLogInterval(timestamp)) { |
| 846 | m_statsLogTimer.stop(); |
| 847 | m_statsLogTimer.startRepeating(statsLogInterval(timestamp)); |
| 848 | } |
| 849 | |
| 850 | for (auto iterator = report->begin(); iterator != report->end(); ++iterator) { |
| 851 | if (logger().willLog(logChannel(), WTFLogLevel::Debug)) { |
| 852 | // Stats are very verbose, let's only display them in inspector console in verbose mode. |
| 853 | logger().debug(LogWebRTC, |
| 854 | Logger::LogSiteIdentifier("LibWebRTCMediaEndpoint" , "OnStatsDelivered" , logIdentifier()), |
| 855 | RTCStatsLogger { *iterator }); |
| 856 | } else { |
| 857 | logger().logAlways(LogWebRTCStats, |
| 858 | Logger::LogSiteIdentifier("LibWebRTCMediaEndpoint" , "OnStatsDelivered" , logIdentifier()), |
| 859 | RTCStatsLogger { *iterator }); |
| 860 | } |
| 861 | } |
| 862 | }); |
| 863 | #else |
| 864 | UNUSED_PARAM(report); |
| 865 | #endif |
| 866 | } |
| 867 | |
| 868 | void LibWebRTCMediaEndpoint::startLoggingStats() |
| 869 | { |
| 870 | #if !RELEASE_LOG_DISABLED |
| 871 | if (m_statsLogTimer.isActive()) |
| 872 | m_statsLogTimer.stop(); |
| 873 | m_statsLogTimer.startRepeating(statsLogInterval(0)); |
| 874 | #endif |
| 875 | } |
| 876 | |
| 877 | void LibWebRTCMediaEndpoint::stopLoggingStats() |
| 878 | { |
| 879 | m_statsLogTimer.stop(); |
| 880 | } |
| 881 | |
| 882 | #if !RELEASE_LOG_DISABLED |
| 883 | WTFLogChannel& LibWebRTCMediaEndpoint::logChannel() const |
| 884 | { |
| 885 | return LogWebRTC; |
| 886 | } |
| 887 | |
| 888 | Seconds LibWebRTCMediaEndpoint::statsLogInterval(int64_t reportTimestamp) const |
| 889 | { |
| 890 | if (logger().willLog(logChannel(), WTFLogLevel::Info)) |
| 891 | return 2_s; |
| 892 | |
| 893 | if (reportTimestamp - m_statsFirstDeliveredTimestamp > 15000000) |
| 894 | return 10_s; |
| 895 | |
| 896 | return 4_s; |
| 897 | } |
| 898 | #endif |
| 899 | |
| 900 | } // namespace WebCore |
| 901 | |
| 902 | namespace WTF { |
| 903 | |
| 904 | template<typename Type> |
| 905 | struct LogArgument; |
| 906 | |
| 907 | template <> |
| 908 | struct LogArgument<WebCore::RTCStatsLogger> { |
| 909 | static String toString(const WebCore::RTCStatsLogger& logger) |
| 910 | { |
| 911 | return String(logger.toJSONString()); |
| 912 | } |
| 913 | }; |
| 914 | |
| 915 | }; // namespace WTF |
| 916 | |
| 917 | |
| 918 | #endif // USE(LIBWEBRTC) |
| 919 | |