创建videoCapturer
首先在Call.java中执行函数 private VideoCapturer createVideoCapturer(int index)来创建videoCapturer,具体代码
private VideoCapturer createVideoCapturer(int index) {
VideoCapturer videoCapturer;
if (useCamera2()) {
if (!captureToTexture()) {
return null;
}
Logger.d("useCamera2");
videoCapturer = createCameraCapturer(new Camera2Enumerator(mContext), index);
} else {
Logger.d("useCamera1");
videoCapturer = createCameraCapturer(new Camera1Enumerator(captureToTexture()), index);
}
return videoCapturer;
}
以Camera1为例,具体使用Camera1Enumerator来创建videoCapturer。执行createCameraCapturer函数,在该函数中调用Camera1Enumerator对videoCapturer进行创建。
private VideoCapturer createCameraCapturer(CameraEnumerator enumerator, int index) {
...
VideoCapturer videoCapturer = enumerator.createCapturer(deviceName, new CameraVideoCapturer.CameraEventsHandler() {
...
在文件Camera1Enumerator.java中执行如下代码,最有返回Camera1Capturer类型的变量,可见videoCapturer真正的类型是Camera1Capturer。
public CameraVideoCapturer createCapturer(String deviceName, CameraEventsHandler eventsHandler) {
return new Camera1Capturer(deviceName, eventsHandler, this.captureToTexture);
}
创建source 和 track,并启动摄像头进行采集
首先在文件PeerConnectionClient.java中执行createPeerConnectionInternal函数,在该函数中执行mediaStream.addTrack(createVideoTrack(videoCapturer));进行source和track的具体创建。
private void createPeerConnectionInternal(EglBase.Context renderEGLContext) {
...
if (peerConnectionParameters.streamMode != StreamMode.RECV_ONLY) {
mediaStream = factory.createLocalMediaStream("ROBOT");
if (videoCallEnabled) {
mediaStream.addTrack(
createVideoTrack(videoCapturer)
);
setRenderVideoTrack();
if (bottomVideoCapturer != null) {
mediaStream.addTrack(
createBottomVideoTrack(bottomVideoCapturer)
);
setBottomRenderVideoTrack();
mediaStream.addTrack(bottomVideoTrack);
}
}
mediaStream.addTrack(createAudioTrack());
peerConnection.addStream(mediaStream);
if (videoCallEnabled) {
findVideoSender();
}
}
...
执行createVideoTrack。
private VideoTrack createVideoTrack(VideoCapturer capturer) {
videoSource = factory.createVideoSource(capturer);
capturer.startCapture(videoWidth, videoHeight, videoFps);
localVideoTrack = factory.createVideoTrack(FORWARD_VIDEO_TRACK_ID, videoSource);
localVideoTrack.setEnabled(renderVideo);
return localVideoTrack;
}
PeerConnectionFactory类创建videoSource。
public VideoSource createVideoSource(VideoCapturer capturer) {
final EglBase.Context eglContext =
localEglbase == null ? null : localEglbase.getEglBaseContext();
final SurfaceTextureHelper surfaceTextureHelper =
SurfaceTextureHelper.create(VIDEO_CAPTURER_THREAD_NAME, eglContext);
long nativeAndroidVideoTrackSource =
nativeCreateVideoSource(nativeFactory, surfaceTextureHelper, capturer.isScreencast());
VideoCapturer.CapturerObserver capturerObserver =
new AndroidVideoTrackSourceObserver(nativeAndroidVideoTrackSource);
capturer.initialize(
surfaceTextureHelper, ContextUtils.getApplicationContext(), capturerObserver);
return new VideoSource(nativeAndroidVideoTrackSource);
}
可见PeerConnectionFactory类调用native函数nativeCreateVideoSource创建AndroidVideoTrackSource类型的变量,并且用long型保存该变量的指针。具体参见video_jni.cc文件中的函数nativeCreateVideoSource。
JNI_FUNCTION_DECLARATION(jlong,
PeerConnectionFactory_nativeCreateVideoSource,
JNIEnv* jni,
jclass,
jlong native_factory,
jobject j_surface_texture_helper,
jboolean is_screencast) {
OwnedFactoryAndThreads* factory =
reinterpret_cast<OwnedFactoryAndThreads*>(native_factory);
rtc::scoped_refptr<AndroidVideoTrackSource> source(
new rtc::RefCountedObject<AndroidVideoTrackSource>(
factory->signaling_thread(), jni, j_surface_texture_helper,
is_screencast));
rtc::scoped_refptr<VideoTrackSourceProxy> proxy_source =
VideoTrackSourceProxy::Create(factory->signaling_thread(),
factory->worker_thread(), source);
return (jlong)proxy_source.release();
}
回到PeerConnectionFactory类函数createVideoSource,接下来会执行如下几行代码。创建AndroidVideoTrackSourceObserver类型的回调,并且通过构造函数将AndroidVideoTrackSource类型的变量传递进去,以便在AndroidVideoTrackSourceObserver接受到图像后传递给AndroidVideoTrackSource。
接着将AndroidVideoTrackSourceObserver类型变量注册到CameraCapturer中来接受图像。
最后执行capturer.initialize进行初始化。
VideoCapturer.CapturerObserver capturerObserver =
new AndroidVideoTrackSourceObserver(nativeAndroidVideoTrackSource);
capturer.initialize(
surfaceTextureHelper, ContextUtils.getApplicationContext(), capturerObserver);
return new VideoSource(nativeAndroidVideoTrackSource);
CameraCapturer.java 类的initialize函数。
@Override
public void initialize(SurfaceTextureHelper surfaceTextureHelper, Context applicationContext,
CapturerObserver capturerObserver) {
this.applicationContext = applicationContext;
this.capturerObserver = capturerObserver;
this.surfaceHelper = surfaceTextureHelper;
this.cameraThreadHandler =
surfaceTextureHelper == null ? null : surfaceTextureHelper.getHandler();
}
回到PeerConnectionClient.java类的createVideoTrack函数,会执行如下代码,开始图像采集。
capturer.startCapture(videoWidth, videoHeight, videoFps);
我们来看一下函数调用流程:
CameraCapturer.java类函数startCapture:
public void startCapture(int width, int height, int framerate) {
...
createSessionInternal(0, null /* mediaRecorder */);
...
CameraCapturer.java类的createSessionInternal函数:
private void createSessionInternal(int delayMs, final MediaRecorder mediaRecorder) {
uiThreadHandler.postDelayed(openCameraTimeoutRunnable, delayMs + OPEN_CAMERA_TIMEOUT);
cameraThreadHandler.postDelayed(new Runnable() {
@Override
public void run() {
createCameraSession(createSessionCallback, cameraSessionEventsHandler, applicationContext,
surfaceHelper, mediaRecorder, cameraName, width, height, framerate);
}
}, delayMs);
}
Camera1Capturer.java类的createCameraSession函数:
@Override
protected void createCameraSession(CameraSession.CreateSessionCallback createSessionCallback,
CameraSession.Events events, Context applicationContext,
SurfaceTextureHelper surfaceTextureHelper, MediaRecorder mediaRecorder, String cameraName,
int width, int height, int framerate) {
Camera1Session.create(createSessionCallback, events,
captureToTexture || (mediaRecorder != null), applicationContext, surfaceTextureHelper,
mediaRecorder, Camera1Enumerator.getCameraIndex(cameraName), width, height, framerate);
}
Camera1Session.java类的create函数:
在该函数中实现了几个功能:
1.打开camera。
2.给camera设置了一些参数,包括帧率的信息。
3.给camera设置的了预览texture。
4.创建Camera1Session。
public static void create(final CreateSessionCallback callback, final Events events,
final boolean captureToTexture, final Context applicationContext,
final SurfaceTextureHelper surfaceTextureHelper, final MediaRecorder mediaRecorder,
final int cameraId, final int width, final int height, final int framerate) {
final long constructionTimeNs = System.nanoTime();
Logging.d(TAG, "Open camera " + cameraId);
events.onCameraOpening();
final android.hardware.Camera camera;
try {
camera = android.hardware.Camera.open(cameraId);
} catch (RuntimeException e) {
callback.onFailure(FailureType.ERROR, e.getMessage());
return;
}
try {
camera.setPreviewTexture(surfaceTextureHelper.getSurfaceTexture());
} catch (IOException e) {
camera.release();
callback.onFailure(FailureType.ERROR, e.getMessage());
return;
}
final android.hardware.Camera.CameraInfo info = new android.hardware.Camera.CameraInfo();
android.hardware.Camera.getCameraInfo(cameraId, info);
final android.hardware.Camera.Parameters parameters = camera.getParameters();
final CaptureFormat captureFormat =
findClosestCaptureFormat(parameters, width, height, framerate);
final Size pictureSize = findClosestPictureSize(parameters, width, height);
updateCameraParameters(camera, parameters, captureFormat, pictureSize, captureToTexture);
if (!captureToTexture) {
final int frameSize = captureFormat.frameSize();
for (int i = 0; i < NUMBER_OF_CAPTURE_BUFFERS; ++i) {
final ByteBuffer buffer = ByteBuffer.allocateDirect(frameSize);
camera.addCallbackBuffer(buffer.array());
}
}
// Calculate orientation manually and send it as CVO insted.
camera.setDisplayOrientation(0 /* degrees */);
callback.onDone(
new Camera1Session(events, captureToTexture, applicationContext, surfaceTextureHelper,
mediaRecorder, cameraId, camera, info, captureFormat, constructionTimeNs));
}
Camera1Session.java类的Camera1Session函数:
private Camera1Session(Events events, boolean captureToTexture, Context applicationContext,
SurfaceTextureHelper surfaceTextureHelper, MediaRecorder mediaRecorder, int cameraId,
android.hardware.Camera camera, android.hardware.Camera.CameraInfo info,
CaptureFormat captureFormat, long constructionTimeNs) {
...
startCapturing();
...
Camera1Session.java类的startCapturing函数,这个函数主要实现了如下几个功能:
1.执行camera.startPreview();启动camera进行图像采集。
2.执行listenForBytebufferFrames();监听图像的到来。
private void startCapturing() {
...
if (captureToTexture) {
listenForTextureFrames();
} else {
listenForBytebufferFrames();
}
try {
camera.startPreview();
} catch (RuntimeException e) {
stopInternal();
events.onCameraError(this, e.getMessage());
}
...
Camera1Session.java类的listenForBytebufferFrames函数,主要实现了将采集到的图像通过回调传递回去,同时在需要时,会对yuv类型进行转换,并且会将图像采集时间传递过去。
private void listenForBytebufferFrames() {
camera.setPreviewCallbackWithBuffer(new android.hardware.Camera.PreviewCallback() {
@Override
public void onPreviewFrame(final byte[] data, android.hardware.Camera callbackCamera) {
checkIsOnCameraThread();
if (callbackCamera != camera) {
Logging.e(TAG, "Callback from a different camera. This should never happen.");
return;
}
if (state != SessionState.RUNNING) {
Logging.d(TAG, "Bytebuffer frame captured but camera is no longer running.");
return;
}
final long captureTimeNs = TimeUnit.MILLISECONDS.toNanos(SystemClock.elapsedRealtime());
if (!firstFrameReported) {
final int startTimeMs =
(int) TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - constructionTimeNs);
camera1StartTimeMsHistogram.addSample(startTimeMs);
firstFrameReported = true;
}
if (videoFrameEmitTrialEnabled) {
VideoFrame.Buffer frameBuffer = new NV21Buffer(data, captureFormat.width,
captureFormat.height, () -> cameraThreadHandler.post(() -> {
if (state == SessionState.RUNNING) {
camera.addCallbackBuffer(data);
}
}));
final VideoFrame frame =
new VideoFrame(frameBuffer, getFrameOrientation(), captureTimeNs);
events.onFrameCaptured(Camera1Session.this, frame);
frame.release();
} else {
events.onByteBufferFrameCaptured(Camera1Session.this, data, captureFormat.width,
captureFormat.height, getFrameOrientation(), captureTimeNs);
camera.addCallbackBuffer(data);
}
}
});
}
CameraCapturer.java类的onFrameCaptured函数:
@Override
public void onFrameCaptured(CameraSession session, VideoFrame frame) {
checkIsOnCameraThread();
synchronized (stateLock) {
if (session != currentSession) {
Logging.w(TAG, "onTextureFrameCaptured from another session.");
return;
}
if (!firstFrameObserved) {
eventsHandler.onFirstFrameAvailable();
firstFrameObserved = true;
}
cameraStatistics.addFrame();
capturerObserver.onFrameCaptured(frame);
}
}
AndroidVideoTrackSourceObserver.java类的onFrameCaptured函数:
@Override
public void onFrameCaptured(VideoFrame frame) {
nativeOnFrameCaptured(nativeSource, frame.getBuffer().getWidth(), frame.getBuffer().getHeight(),
frame.getRotation(), frame.getTimestampNs(), frame.getBuffer());
}
androidvideotracksource_jni.cc文件中的JNI_FUNCTION_DECLARATION函数:
JNI_FUNCTION_DECLARATION(void,
AndroidVideoTrackSourceObserver_nativeOnFrameCaptured,
JNIEnv* jni,
jclass,
jlong j_source,
jint j_width,
jint j_height,
jint j_rotation,
jlong j_timestamp_ns,
jobject j_video_frame_buffer) {
AndroidVideoTrackSource* source =
AndroidVideoTrackSourceFromJavaProxy(j_source);
source->OnFrameCaptured(jni, j_width, j_height, j_timestamp_ns,
jintToVideoRotation(j_rotation),
j_video_frame_buffer);
}
AndroidVideoTrackSource类的OnFrameCaptured函数,在这个函数中实现了:
1.需要的时候对图像进行裁剪。
2.数据接着往外传递。
void AndroidVideoTrackSource::OnFrameCaptured(JNIEnv* jni,
int width,
int height,
int64_t timestamp_ns,
VideoRotation rotation,
jobject j_video_frame_buffer) {
RTC_DCHECK(camera_thread_checker_.CalledOnValidThread());
int64_t camera_time_us = timestamp_ns / rtc::kNumNanosecsPerMicrosec;
int64_t translated_camera_time_us =
timestamp_aligner_.TranslateTimestamp(camera_time_us, rtc::TimeMicros());
int adapted_width;
int adapted_height;
int crop_width;
int crop_height;
int crop_x;
int crop_y;
//这个地方可能对图像进行了裁剪。
if (!AdaptFrame(width, height, camera_time_us, &adapted_width,
&adapted_height, &crop_width, &crop_height, &crop_x,
&crop_y)) {
return;
}
rtc::scoped_refptr<VideoFrameBuffer> buffer =
AndroidVideoBuffer::Create(jni, j_video_frame_buffer)
->CropAndScale(jni, crop_x, crop_y, crop_width, crop_height,
adapted_width, adapted_height);
// AdaptedVideoTrackSource handles applying rotation for I420 frames.
if (apply_rotation() && rotation != kVideoRotation_0) {
buffer = buffer->ToI420();
}
OnFrame(VideoFrame(buffer, rotation, translated_camera_time_us));
}
AdaptedVideoTrackSource类中的OnFrame函数:
void AdaptedVideoTrackSource::OnFrame(const webrtc::VideoFrame& frame) {
rtc::scoped_refptr<webrtc::VideoFrameBuffer> buffer(
frame.video_frame_buffer());
/* Note that this is a "best effort" approach to
wants.rotation_applied; apply_rotation_ can change from false to
true between the check of apply_rotation() and the call to
broadcaster_.OnFrame(), in which case we generate a frame with
pending rotation despite some sink with wants.rotation_applied ==
true was just added. The VideoBroadcaster enforces
synchronization for us in this case, by not passing the frame on
to sinks which don't want it. */
if (apply_rotation() && frame.rotation() != webrtc::kVideoRotation_0 &&
buffer->type() == webrtc::VideoFrameBuffer::Type::kI420) {
/* Apply pending rotation. */
broadcaster_.OnFrame(webrtc::VideoFrame(
webrtc::I420Buffer::Rotate(*buffer->GetI420(), frame.rotation()),
webrtc::kVideoRotation_0, frame.timestamp_us()));
} else {
broadcaster_.OnFrame(frame);
}
}
VideoBroadcaster类的OnFrame函数,这个函数实现查询sink_pairs()找到不同的sink,将图像传递给这些sink,这些sink包括编码器。编码器获得图像是在这个地方。
void VideoBroadcaster::OnFrame(const webrtc::VideoFrame& frame) {
rtc::CritScope cs(&sinks_and_wants_lock_);
for (auto& sink_pair : sink_pairs()) {
if (sink_pair.wants.rotation_applied &&
frame.rotation() != webrtc::kVideoRotation_0) {
// Calls to OnFrame are not synchronized with changes to the sink wants.
// When rotation_applied is set to true, one or a few frames may get here
// with rotation still pending. Protect sinks that don't expect any
// pending rotation.
RTC_LOG(LS_VERBOSE) << "Discarding frame with unexpected rotation.";
continue;
}
if (sink_pair.wants.black_frames) {
sink_pair.sink->OnFrame(webrtc::VideoFrame(
GetBlackFrameBuffer(frame.width(), frame.height()), frame.rotation(),
frame.timestamp_us()));
} else {
sink_pair.sink->OnFrame(frame);
}
}
}
编码器初始化与采集模块的衔接:
编码器的初始化从创建PeerConnectionFactory开始。
PeerConnectionClient.java类的createPeerConnectionFactoryInternal函数:
private void createPeerConnectionFactoryInternal(Context context) {
...
//由于使用硬件加速,这里创建了DefaultVideoEncoderFactory,用于创建硬件编码器。
if (peerConnectionParameters.videoCodecHwAcceleration) {
encoderFactory = new DefaultVideoEncoderFactory(
rootEglBase.getEglBaseContext(), true /* enableIntelVp8Encoder */, enableH264HighProfile);
decoderFactory = null;//new DefaultVideoDecoderFactory(rootEglBase.getEglBaseContext());
} else {
encoderFactory = new SoftwareVideoEncoderFactory();
decoderFactory = new SoftwareVideoDecoderFactory();
}
factory = new PeerConnectionFactory(options, encoderFactory, decoderFactory);
...
PeerConnectionClient的构造函数。这里创建了native PeerConnectionFactory,并且用long保存该变量的指针。
public PeerConnectionFactory(
Options options, VideoEncoderFactory encoderFactory, VideoDecoderFactory decoderFactory) {
checkInitializeHasBeenCalled();
nativeFactory = nativeCreatePeerConnectionFactory(options, encoderFactory, decoderFactory);
if (nativeFactory == 0) {
throw new RuntimeException("Failed to initialize PeerConnectionFactory!");
}
}
具体创建native PeerConnectionFactory在文件peerconnectionfactory_jni.cc中。
JNI_FUNCTION_DECLARATION(
jlong,
PeerConnectionFactory_nativeCreatePeerConnectionFactory,
JNIEnv* jni,
jclass,
jobject joptions,
jobject jencoder_factory,
jobject jdecoder_factory) {
return CreatePeerConnectionFactoryForJava(jni, joptions, jencoder_factory,
jdecoder_factory,
CreateAudioProcessing());
}
peerconnectionfactory_jni.cc文件中的CreatePeerConnectionFactoryForJava函数。
jlong CreatePeerConnectionFactoryForJava(
JNIEnv* jni,
jobject joptions,
jobject jencoder_factory,
jobject jdecoder_factory,
rtc::scoped_refptr<AudioProcessing> audio_processor) {
...
// This uses the new API, does not automatically include software codecs.
std::unique_ptr<VideoEncoderFactory> video_encoder_factory = nullptr;
if (jencoder_factory == nullptr) {
legacy_video_encoder_factory = CreateLegacyVideoEncoderFactory();
video_encoder_factory = std::unique_ptr<VideoEncoderFactory>(
WrapLegacyVideoEncoderFactory(legacy_video_encoder_factory));
} else {
// video_encoder_factory真正的类型是:VideoEncoderFactoryWrapper。
//用一个Wrapper来构建c++和java交互。
video_encoder_factory = std::unique_ptr<VideoEncoderFactory>(
CreateVideoEncoderFactory(jni, jencoder_factory));
}
//由于传递过来的jdecoder_factory为nullptr,所以在这里调用
//CreateLegacyVideoDecoderFactory创建了一个legacy_video_decoder_factory。
//进入CreateLegacyVideoDecoderFactory函数,发现new 了一个
//MediaCodecVideoDecoderFactory类型的对象,而该类型的对象创建的解码器类型
//是MediaCodecVideoDecoder,是硬件解码器。通过这里我们也了解到webrtc在
//android上使用的解码类型是HW(hardware)。编码类型也是使用的也是HW。
std::unique_ptr<VideoDecoderFactory> video_decoder_factory = nullptr;
if (jdecoder_factory == nullptr) {
legacy_video_decoder_factory = CreateLegacyVideoDecoderFactory();
video_decoder_factory = std::unique_ptr<VideoDecoderFactory>(
WrapLegacyVideoDecoderFactory(legacy_video_decoder_factory));
} else {
video_decoder_factory = std::unique_ptr<VideoDecoderFactory>(
CreateVideoDecoderFactory(jni, jdecoder_factory));
}
//CreateMediaEngine创造的类型是:
//CompositeMediaEngine<WebRtcVoiceEngine, WebRtcVideoEngine>
//该类在文件mediaengine.h中。
//video_encoder_factory和video_decoder_factory传递进入,用于创建编解码器。
rtc::scoped_refptr<AudioDeviceModule> adm_scoped = nullptr;
media_engine.reset(CreateMediaEngine(
adm_scoped, audio_encoder_factory, audio_decoder_factory,
std::move(video_encoder_factory), std::move(video_decoder_factory),
audio_mixer, audio_processor));
...
到这里我们了解了video_encoder_factory和video_decoder_factory的创建过程,接下来将了解video_encoder的创建过程和与采集模块的衔接。
来到peerconnection.cc文件中有一个函数PeerConnection::SetLocalDescription,这个函数在webrtc CreateOffer或者是CreateAnswer过程中产生sdp是被调用。
void PeerConnection::SetLocalDescription(
SetSessionDescriptionObserver* observer,
SessionDescriptionInterface* desc) {
...
// Takes the ownership of |desc_temp|. On success, local_description() is
// updated to reflect the description that was passed in.
if (!SetCurrentOrPendingLocalDescription(std::move(desc_temp), &error)) {
PostSetSessionDescriptionFailure(observer, error);
return;
}
...
PeerConnection类的SetCurrentOrPendingLocalDescription函数:
bool PeerConnection::SetCurrentOrPendingLocalDescription(
std::unique_ptr<SessionDescriptionInterface> desc,
std::string* err_desc) {
...
// Transport and Media channels will be created only when offer is set.
if (action == kOffer && !CreateChannels(local_description()->description())) {
// TODO(mallinath) - Handle CreateChannel failure, as new local description
// is applied. Restore back to old description.
return BadLocalSdp(local_description()->type(), kCreateChannelFailed,
err_desc);
}
...
PeerConnection类的CreateChannels函数:
bool PeerConnection::CreateChannels(const SessionDescription* desc) {
...
const cricket::ContentInfo* video = cricket::GetFirstVideoContent(desc);
if (video && !video->rejected && !video_channel()) {
if (!CreateVideoChannel(video,
GetBundleTransportName(video, bundle_group))) {
RTC_LOG(LS_ERROR) << "Failed to create video channel.";
return false;
}
}
...
PeerConnection类的CreateVideoChannel函数:
// TODO(steveanton): Perhaps this should be managed by the RtpTransceiver.
bool PeerConnection::CreateVideoChannel(const cricket::ContentInfo* content,
const std::string* bundle_transport) {
...
cricket::VideoChannel* video_channel = channel_manager()->CreateVideoChannel(
call_.get(), configuration_.media_config, rtp_dtls_transport,
rtcp_dtls_transport, transport_controller_->signaling_thread(),
content->name, SrtpRequired(), video_options_);
//将video_channel进行存储。
GetVideoTransceiver()->internal()->SetChannel(video_channel);
...
ChannelManager类的CreateVideoChannel函数:
VideoChannel* ChannelManager::CreateVideoChannel(
webrtc::Call* call,
const cricket::MediaConfig& media_config,
DtlsTransportInternal* rtp_transport,
DtlsTransportInternal* rtcp_transport,
rtc::Thread* signaling_thread,
const std::string& content_name,
bool srtp_required,
const VideoOptions& options) {
return worker_thread_->Invoke<VideoChannel*>(RTC_FROM_HERE, [&] {
return CreateVideoChannel_w(
call, media_config, rtp_transport, rtcp_transport, rtp_transport,
rtcp_transport, signaling_thread, content_name, srtp_required, options);
});
}
ChannelManager类的CreateVideoChannel_w函数:
VideoChannel* ChannelManager::CreateVideoChannel_w(
webrtc::Call* call,
const cricket::MediaConfig& media_config,
DtlsTransportInternal* rtp_dtls_transport,
DtlsTransportInternal* rtcp_dtls_transport,
rtc::PacketTransportInternal* rtp_packet_transport,
rtc::PacketTransportInternal* rtcp_packet_transport,
rtc::Thread* signaling_thread,
const std::string& content_name,
bool srtp_required,
const VideoOptions& options) {
RTC_DCHECK_RUN_ON(worker_thread_);
RTC_DCHECK(initialized_);
RTC_DCHECK(call);
RTC_DCHECK(media_engine_);
//media_engine_真正的类型是
//CompositeMediaEngine<WebRtcVoiceEngine, WebRtcVideoEngine>
//该类在文件mediaengine.h中。
VideoMediaChannel* media_channel = media_engine_->CreateVideoChannel(
call, media_config, options);
if (!media_channel) {
return nullptr;
}
//创建类型是VideoChannel的video_channel,他包含了类型是WebRtcVideoChannel的media_channel。
auto video_channel = rtc::MakeUnique<VideoChannel>(
worker_thread_, network_thread_, signaling_thread,
rtc::WrapUnique(media_channel), content_name,
rtcp_packet_transport == nullptr, srtp_required);
video_channel->Init_w(rtp_dtls_transport, rtcp_dtls_transport,
rtp_packet_transport, rtcp_packet_transport);
VideoChannel* video_channel_ptr = video_channel.get();
video_channels_.push_back(std::move(video_channel));
return video_channel_ptr;
}
virtual VideoMediaChannel* CreateVideoChannel(webrtc::Call* call,
const MediaConfig& config,
const VideoOptions& options) {
return video().CreateChannel(call, config, options);
}
WebRtcVideoChannel* WebRtcVideoEngine::CreateChannel(
webrtc::Call* call,
const MediaConfig& config,
const VideoOptions& options) {
RTC_LOG(LS_INFO) << "CreateChannel. Options: " << options.ToString();
return new WebRtcVideoChannel(call, config, options, encoder_factory_.get(), decoder_factory_.get());
}
以上一连串看似无关紧要的函数调用关系说明了WebRtcVideoChannel的创建过程,而WebRtcVideoChannel是创建编码器,初始化编码器的一个桥梁。
回到PeerConnection类的SetCurrentOrPendingLocalDescription函数:
bool PeerConnection::SetCurrentOrPendingLocalDescription(
std::unique_ptr<SessionDescriptionInterface> desc,
std::string* err_desc) {
...
if (!UpdateSessionState(action, cricket::CS_LOCAL, err_desc)) {
return false;
}
...
PeerConnection类的UpdateSessionState
bool PeerConnection::UpdateSessionState(Action action,
cricket::ContentSource source,
std::string* err_desc) {
...
if (!PushdownMediaDescription(cricket::CA_OFFER, source, err_desc)) {
SetError(ERROR_CONTENT, *err_desc);
}
...
PeerConnection类的PushdownMediaDescription函数:
bool PeerConnection::PushdownMediaDescription(cricket::ContentAction action,
cricket::ContentSource source,
std::string* err) {
...
for (auto* channel : Channels()) {
// TODO(steveanton): Add support for multiple channels of the same type.
const ContentInfo* content_info =
cricket::GetFirstMediaContent(sdesc->contents(), channel->media_type());
if (!content_info) {
continue;
}
const MediaContentDescription* content_desc =
static_cast<const MediaContentDescription*>(content_info->description);
if (content_desc && !content_info->rejected) {
bool success = (source == cricket::CS_LOCAL)
? channel->SetLocalContent(content_desc, action, err)
: channel->SetRemoteContent(content_desc, action, err);
if (!success) {
all_success = false;
break;
}
}
}
...
BaseChannel类的SetLocalContent函数:
bool BaseChannel::SetLocalContent(const MediaContentDescription* content,
ContentAction action,
std::string* error_desc) {
TRACE_EVENT0("webrtc", "BaseChannel::SetLocalContent");
return InvokeOnWorker<bool>(
RTC_FROM_HERE,
Bind(&BaseChannel::SetLocalContent_w, this, content, action, error_desc));
}
VideoChannel类的SetLocalContent_w函数:
bool VideoChannel::SetLocalContent_w(const MediaContentDescription* content,
ContentAction action,
std::string* error_desc) {
...
// TODO(pthatcher): Move local streams into VideoSendParameters, and
// only give it to the media channel once we have a remote
// description too (without a remote description, we won't be able
// to send them anyway).
if (!UpdateLocalStreams_w(video->streams(), action, error_desc)) {
SafeSetError("Failed to set local video description streams.", error_desc);
return false;
}
...
BaseChannel类的UpdateLocalStreams_w函数:
bool BaseChannel::UpdateLocalStreams_w(const std::vector<StreamParams>& streams,
ContentAction action,
std::string* error_desc) {
...
// Check for new streams.
for (StreamParamsVec::const_iterator it = streams.begin();
it != streams.end(); ++it) {
if (!GetStreamBySsrc(local_streams_, it->first_ssrc())) {
if (media_channel()->AddSendStream(*it)) {
RTC_LOG(LS_INFO) << "Add send stream ssrc: " << it->ssrcs[0];
} else {
std::ostringstream desc;
desc << "Failed to add send stream ssrc: " << it->first_ssrc();
SafeSetError(desc.str(), error_desc);
ret = false;
}
}
}
...
WebRtcVideoChannel类的AddSendStream函数:
bool WebRtcVideoChannel::AddSendStream(const StreamParams& sp) {
...
//创建WebRtcVideoSendStream并保存到send_streams_中。
WebRtcVideoSendStream* stream = new WebRtcVideoSendStream(
call_, sp, std::move(config), default_send_options_, encoder_factory_,
video_config_.enable_cpu_overuse_detection,
bitrate_config_.max_bitrate_bps, send_codec_, send_rtp_extensions_,
send_params_);
uint32_t ssrc = sp.first_ssrc();
RTC_DCHECK(ssrc != 0);
send_streams_[ssrc] = stream;
...
WebRtcVideoSendStream的构造函数:
WebRtcVideoChannel::WebRtcVideoSendStream::WebRtcVideoSendStream(
...
if (codec_settings) {
bool force_encoder_allocation = false;
SetCodec(*codec_settings, force_encoder_allocation);
}
...
WebRtcVideoChannel::WebRtcVideoSendStream类的SetCodec函数:
void WebRtcVideoChannel::WebRtcVideoSendStream::SetCodec(
const VideoCodecSettings& codec_settings,
bool force_encoder_allocation) {
...
// Do not re-create encoders of the same type. We can't overwrite
// |allocated_encoder_| immediately, because we need to release it after the
// RecreateWebRtcStream() call.
std::unique_ptr<webrtc::VideoEncoder> new_encoder;
if (force_encoder_allocation || !allocated_encoder_ ||
allocated_codec_ != codec_settings.codec) {
const webrtc::SdpVideoFormat format(codec_settings.codec.name,
codec_settings.codec.params);
//在这个地方创建了编码器new_encoder,最终将这个编码器保存到了
//VCMCodecDataBase的external_encoder_成员。new_encoder 的类型是
//VideoEncoderWrapper,是对HardwareVideoEncoder的封装。
//由VideoEncoderFactoryWrapper创建。
new_encoder = encoder_factory_->CreateVideoEncoder(format);
parameters_.config.encoder_settings.encoder = new_encoder.get();
const webrtc::VideoEncoderFactory::CodecInfo info =
encoder_factory_->QueryVideoEncoder(format);
parameters_.config.encoder_settings.full_overuse_time =
info.is_hardware_accelerated;
parameters_.config.encoder_settings.internal_source =
info.has_internal_source;
} else {
new_encoder = std::move(allocated_encoder_);
}
RecreateWebRtcStream();
....
WebRtcVideoChannel::WebRtcVideoSendStream类的RecreateWebRtcStream函数,/这个函数中主要包括如下两个流程:
1.调用Call对象的CreateVideoSendStream创建VideoSendStream对象。
2.调用SetSource函数设置图像数据源,最后会调用VideoTrack的AddOrUpdateSink函数注册sink对象,该sink对象是一个VideoStreamEncoder对象。
void WebRtcVideoChannel::WebRtcVideoSendStream::RecreateWebRtcStream() {
...
webrtc::VideoSendStream::Config config = parameters_.config.Copy();
//创建的stream_真正的类型是VideoSendStream。
stream_ = call_->CreateVideoSendStream(std::move(config),
parameters_.encoder_config.Copy());
parameters_.encoder_config.encoder_specific_settings = NULL;
//调用SetSource函数设置图像数据源,最后会调用VideoTrack的
//AddOrUpdateSink函数注册sink对象,该sink对象是一个
//VideoStreamEncoder对象。这样到图像到来的时候就会传递给
//VideoStreamEncoder,最后传递给编码器。
if (source_) {
stream_->SetSource(this, GetDegradationPreference());
}
...
我们先看编码器的初始化,然后再看一下采集与编码的衔接。
Call类的CreateVideoSendStream函数:
webrtc::VideoSendStream* Call::CreateVideoSendStream(
webrtc::VideoSendStream::Config config,
VideoEncoderConfig encoder_config) {
VideoSendStream* send_stream = new VideoSendStream(
num_cpu_cores_, module_process_thread_.get(), &worker_queue_,
call_stats_.get(), transport_send_.get(), bitrate_allocator_.get(),
video_send_delay_stats_.get(), event_log_, std::move(config),
std::move(encoder_config), suspended_video_send_ssrcs_,
suspended_video_payload_states_);
video_send_streams_.insert(send_stream);
VideoSendStream构造函数。/
该构造函数主要是创建了VideoStreamEncoder对象和VideoSendStreamImpl对象,然后调用ReconfigureVideoEncoder初始化编码器
VideoSendStreamImpl对象的创建是在ConstructionTask的Run函数中完成的。
VideoSendStream::VideoSendStream(
int num_cpu_cores,
ProcessThread* module_process_thread,
rtc::TaskQueue* worker_queue,
CallStats* call_stats,
RtpTransportControllerSendInterface* transport,
BitrateAllocator* bitrate_allocator,
SendDelayStats* send_delay_stats,
RtcEventLog* event_log,
VideoSendStream::Config config,
VideoEncoderConfig encoder_config,
const std::map<uint32_t, RtpState>& suspended_ssrcs,
const std::map<uint32_t, RtpPayloadState>& suspended_payload_states)
video_stream_encoder_.reset(
new VideoStreamEncoder(num_cpu_cores, &stats_proxy_,
config_.encoder_settings,
config_.pre_encode_callback,
config_.post_encode_callback,
std::unique_ptr<OveruseFrameDetector>()));
worker_queue_->PostTask(std::unique_ptr<rtc::QueuedTask>(new ConstructionTask(
&send_stream_, &thread_sync_event_, &stats_proxy_,
video_stream_encoder_.get(), module_process_thread, call_stats, transport,
bitrate_allocator, send_delay_stats, event_log, &config_,
encoder_config.max_bitrate_bps, suspended_ssrcs, suspended_payload_states,
encoder_config.content_type)));
ReconfigureVideoEncoder(std::move(encoder_config));
VideoStreamEncoder::VideoStreamEncoder(
uint32_t number_of_cores,
SendStatisticsProxy* stats_proxy,
const VideoSendStream::Config::EncoderSettings& settings,
rtc::VideoSinkInterface<VideoFrame>* pre_encode_callback,
EncodedFrameObserver* encoder_timing,
std::unique_ptr<OveruseFrameDetector> overuse_detector)
: shutdown_event_(true /* manual_reset */, false),
number_of_cores_(number_of_cores),
initial_rampup_(0),
//主要是初始化source_proxy_对象和video_sender_对象。
source_proxy_(new VideoSourceProxy(this)),
sink_(nullptr),
settings_(settings),
codec_type_(PayloadStringToCodecType(settings.payload_name)),
video_sender_(Clock::GetRealTimeClock(), this),
VideoSender::VideoSender(Clock* clock, EncodedImageCallback* **post_encode_callback**)
: _encoder(nullptr),
_mediaOpt(clock),
//主要是初始化_encodedFrameCallback和codecDataBase对象。
//post_encode_callback是一个VideoStreamEncoder对象。
//_encodedFrameCallback 的类型是VCMEncodedFrameCallback。
_encodedFrameCallback(post_encode_callback, &_mediaOpt),
post_encode_callback_(post_encode_callback),
//_encodedFrameCallback类型是VCMEncodedFrameCallback,他包含了
//VideoStreamEncoder对象作为回掉。这个回调会被注册到编码器中,接受编码器编码后的数据。
_codecDataBase(&_encodedFrameCallback),
回到VideoStreamEncoder构造函数:
VideoStreamEncoder::VideoStreamEncoder(
video_sender_.RegisterExternalEncoder(
settings_.encoder, settings_.payload_type,
settings_.internal_source);
/ Register an external decoder object.
// This can not be used together with external decoder callbacks.
void VideoSender::RegisterExternalEncoder(
VideoEncoder* externalEncoder,
uint8_t payloadType,
bool internalSource /*= false*/) {
//最终将编码器对象保存在VCMCodecDataBase的external_encoder_成员。
//externalEncoder 的类型是VideoEncoderWrapper,
//是对HardwareVideoEncoder的封装。由VideoEncoderFactoryWrapper创建。
_codecDataBase.RegisterExternalEncoder(externalEncoder, payloadType,
internalSource);
再回到VideoSendStream的构造函数会执行ReconfigureVideoEncoder(std::move(encoder_config)); 来对编码器进行初始化。
VideoSendStream类的ReconfigureVideoEncoder函数:
void VideoSendStream::ReconfigureVideoEncoder(VideoEncoderConfig config) {
// TODO(perkj): Some test cases in VideoSendStreamTest call
// ReconfigureVideoEncoder from the network thread.
// RTC_DCHECK_RUN_ON(&thread_checker_);
RTC_DCHECK(content_type_ == config.content_type);
video_stream_encoder_->ConfigureEncoder(std::move(config),
config_.rtp.max_packet_size,
config_.rtp.nack.rtp_history_ms > 0);
}
VideoStreamEncoder的ConfigureEncoder函数:
void VideoStreamEncoder::ConfigureEncoder(VideoEncoderConfig config,
size_t max_data_payload_length,
bool nack_enabled) {
encoder_queue_.PostTask(
std::unique_ptr<rtc::QueuedTask>(new ConfigureEncoderTask(
this, std::move(config), max_data_payload_length, nack_enabled)));
}
VideoStreamEncoder的ConfigureEncoderOnTaskQueue函数:
void VideoStreamEncoder::ConfigureEncoderOnTaskQueue(
VideoEncoderConfig config,
size_t max_data_payload_length,
bool nack_enabled) {
// Reconfigure the encoder now if the encoder has an internal source or
// if the frame resolution is known. Otherwise, the reconfiguration is
// deferred until the next frame to minimize the number of reconfigurations.
// The codec configuration depends on incoming video frame size.
if (last_frame_info_) {
ReconfigureEncoder();
} else if (settings_.internal_source) {
last_frame_info_ =
rtc::Optional<VideoFrameInfo>(VideoFrameInfo(176, 144, false));
ReconfigureEncoder();
}
VideoStreamEncoder类的ReconfigureEncoder函数:
void VideoStreamEncoder::ReconfigureEncoder() {
bool success = video_sender_.RegisterSendCodec(
&codec, number_of_cores_,
static_cast<uint32_t>(max_data_payload_length_)) == VCM_OK;
VideoSender类的RegisterSendCodec函数:
// Register the send codec to be used.
int32_t VideoSender::RegisterSendCodec(const VideoCodec* sendCodec,
uint32_t numberOfCores,
uint32_t maxPayloadSize) {
bool ret =
_codecDataBase.SetSendCodec(sendCodec, numberOfCores, maxPayloadSize);
// Update encoder regardless of result to make sure that we're not holding on
// to a deleted instance.
_encoder = _codecDataBase.GetEncoder();
// Cache the current codec here so they can be fetched from this thread
// without requiring the _sendCritSect lock.
current_codec_ = *sendCodec;
VCMCodecDataBase类的SetSendCodec函数,在这个函数中实现了:
1.创建类型是VCMGenericEncoder的ptr_encoder_变量,并将之前创建的编码器和编码后接受数据的回调传递进去。
2.调用编码器初始化函数。
// Assuming only one registered encoder - since only one used, no need for more.
bool VCMCodecDataBase::SetSendCodec(const VideoCodec* send_codec,
int number_of_cores,
size_t max_payload_size) {
ptr_encoder_.reset(new VCMGenericEncoder(
external_encoder_, encoded_frame_callback_, internal_source_));
encoded_frame_callback_->SetInternalSource(internal_source_);
if (ptr_encoder_->InitEncode(&send_codec_, number_of_cores_,
max_payload_size_) < 0) {
RTC_LOG(LS_ERROR) << "Failed to initialize video encoder.";
DeleteEncoder();
return false;
VCMGenericEncoder类的InitEncode函数:
int32_t VCMGenericEncoder::InitEncode(const VideoCodec* settings,
int32_t number_of_cores,
size_t max_payload_size) {
//真正的编码器初始化。
if (encoder_->InitEncode(settings, number_of_cores, max_payload_size) != 0) {
RTC_LOG(LS_ERROR) << "Failed to initialize the encoder associated with "
"payload name: "
<< settings->plName;
return -1;
}
vcm_encoded_frame_callback_->Reset();
//这里将回调注册到编码器,图像被编码完成后会进行回调。
encoder_->RegisterEncodeCompleteCallback(vcm_encoded_frame_callback_);
到这里编码器的初始化流程就完成了,接下来我们回到WebRtcVideoChannel::WebRtcVideoSendStream类的RecreateWebRtcStream函数,来看一下采集如何和编码进行衔接。
WebRtcVideoChannel::WebRtcVideoSendStream类的RecreateWebRtcStream函数:
void WebRtcVideoChannel::WebRtcVideoSendStream::RecreateWebRtcStream() {
...
webrtc::VideoSendStream::Config config = parameters_.config.Copy();
//创建的stream_真正的类型是VideoSendStream。
stream_ = call_->CreateVideoSendStream(std::move(config),
parameters_.encoder_config.Copy());
parameters_.encoder_config.encoder_specific_settings = NULL;
//调用SetSource函数设置图像数据源,最后会调用VideoTrack的
//AddOrUpdateSink函数注册sink对象,该sink对象是一个
//VideoStreamEncoder对象。这样到图像到来的时候就会传递给
//VideoStreamEncoder,最后传递给编码器。
if (source_) {
stream_->SetSource(this, GetDegradationPreference());
}
...
VideoSendStream类的SetSource函数:
void VideoSendStream::SetSource(
rtc::VideoSourceInterface<webrtc::VideoFrame>* source,
const DegradationPreference& degradation_preference) {
RTC_DCHECK_RUN_ON(&thread_checker_);
video_stream_encoder_->SetSource(source, degradation_preference);
}
VideoStreamEncoder类的SetSource函数:
void VideoStreamEncoder::SetSource(
rtc::VideoSourceInterface<VideoFrame>* source,
const VideoSendStream::DegradationPreference& degradation_preference) {
source_proxy_->SetSource(source, degradation_preference);
VideoStreamEncoder::VideoSourceProxy类的SetSource函数:
void VideoStreamEncoder::VideoSourceProxy::SetSource(
rtc::VideoSourceInterface<VideoFrame>* source,
const VideoSendStream::DegradationPreference& degradation_preference) {
// Called on libjingle's worker thread.
RTC_DCHECK_CALLED_SEQUENTIALLY(&main_checker_);
rtc::VideoSourceInterface<VideoFrame>* old_source = nullptr;
rtc::VideoSinkWants wants;
{
rtc::CritScope lock(&crit_);
degradation_preference_ = degradation_preference;
old_source = source_;
//这里的source的类型是WebRtcVideoChannel::WebRtcVideoSendStream。
source_ = source;
wants = GetActiveSinkWantsInternal();
}
if (old_source != source && old_source != nullptr) {
old_source->RemoveSink(video_stream_encoder_);
}
if (!source) {
return;
}
source->AddOrUpdateSink(video_stream_encoder_, wants);
}
WebRtcVideoChannel::WebRtcVideoSendStream类的AddOrUpdateSink函数:
void WebRtcVideoChannel::WebRtcVideoSendStream::AddOrUpdateSink(
rtc::VideoSinkInterface<webrtc::VideoFrame>* sink,
const rtc::VideoSinkWants& wants) {
//这里source其实是上层的track,真正的类型是VideoTrack。
source_->AddOrUpdateSink(encoder_sink_, wants);
VideoTrack类的AddOrUpdateSink函数:
void VideoTrack::AddOrUpdateSink(
rtc::VideoSinkInterface<VideoFrame>* sink,
const rtc::VideoSinkWants& wants) {
RTC_DCHECK(worker_thread_->IsCurrent());
VideoSourceBase::AddOrUpdateSink(sink, wants);
rtc::VideoSinkWants modified_wants = wants;
modified_wants.black_frames = !enabled();
//video_source_的真正类型是AndroidVideoTrackSource,在VideoTrack构造函数中被初始化。
video_source_->AddOrUpdateSink(sink, modified_wants);
}
AndroidVideoTrackSource继承自AdaptedVideoTrackSource,所以这里执行的AdaptedVideoTrackSource类的AddOrUpdateSink。
void AdaptedVideoTrackSource::AddOrUpdateSink(
rtc::VideoSinkInterface<webrtc::VideoFrame>* sink,
const rtc::VideoSinkWants& wants) {
RTC_DCHECK(thread_checker_.CalledOnValidThread());
//sink的真正类型是VideoStreamEncoder。这里链接到采集模块数据的到来。
broadcaster_.AddOrUpdateSink(sink, wants);
OnSinkWantsChanged(broadcaster_.wants());
}
void VideoBroadcaster::AddOrUpdateSink(
VideoSinkInterface<webrtc::VideoFrame>* sink,
const VideoSinkWants& wants) {
RTC_DCHECK(thread_checker_.CalledOnValidThread());
RTC_DCHECK(sink != nullptr);
rtc::CritScope cs(&sinks_and_wants_lock_);
//子类调用父类相同的函数。
VideoSourceBase::AddOrUpdateSink(sink, wants);
UpdateWants();
到这里我们了解了采集与编码的衔接。接下来我们将看到图像到来时,编码器的对数据的处理流程。
开始编码
通过采集与编码衔接得知当图像到来的时候会进入VideoStreamEncoder的OnFrame函数。
void VideoStreamEncoder::OnFrame(const VideoFrame& video_frame) {
encoder_queue_.PostTask(std::unique_ptr<rtc::QueuedTask>(
new EncodeTask(incoming_frame, this, rtc::TimeMicros(), log_stats)));
bool VideoStreamEncoder::EncodeTask:: Run()override {
video_stream_encoder_->EncodeVideoFrame(frame_, time_when_posted_us_);
VideoStreamEncoder类的EncodeVideoFrame函数:
void VideoStreamEncoder::EncodeVideoFrame(const VideoFrame& video_frame,
int64_t time_when_posted_us) {
static int captureFrameCount = 0;
static double nextCaptureStatisticsTime = -1;
static double UNIT_TIME_INTERVAL = 1000;
static int capturePreprocessingFrameCount = 0;
static double nextCapturePreprocessingStatisticsTime = -1;
captureFrameCount++;
long currentTime = clock_->TimeInMicroseconds()/rtc::kNumMicrosecsPerMillisec;
if(nextCaptureStatisticsTime == -1) {
nextCaptureStatisticsTime = currentTime + UNIT_TIME_INTERVAL;
}
if(currentTime > nextCaptureStatisticsTime) {
RTC_LOG(LS_INFO) << "statistics VideoStreamEncoder capture frame count:" << captureFrameCount;
nextCaptureStatisticsTime = currentTime + UNIT_TIME_INTERVAL;
captureFrameCount = 0;
}
RTC_DCHECK_RUN_ON(&encoder_queue_);
if (pre_encode_callback_)
pre_encode_callback_->OnFrame(video_frame);
if (!last_frame_info_ || video_frame.width() != last_frame_info_->width ||
video_frame.height() != last_frame_info_->height ||
video_frame.is_texture() != last_frame_info_->is_texture) {
pending_encoder_reconfiguration_ = true;
last_frame_info_ = rtc::Optional<VideoFrameInfo>(VideoFrameInfo(
video_frame.width(), video_frame.height(), video_frame.is_texture()));
RTC_LOG(LS_INFO) << "Video frame parameters changed: dimensions="
<< last_frame_info_->width << "x"
<< last_frame_info_->height
<< ", texture=" << last_frame_info_->is_texture << ".";
}
if (initial_rampup_ < kMaxInitialFramedrop &&
video_frame.size() >
MaximumFrameSizeForBitrate(encoder_start_bitrate_bps_ / 1000)) {
RTC_LOG(LS_INFO) << "Dropping frame. Too large for target bitrate.";
AdaptDown(kQuality);
++initial_rampup_;
return;
}
initial_rampup_ = kMaxInitialFramedrop;
int64_t now_ms = clock_->TimeInMilliseconds();
if (pending_encoder_reconfiguration_) {
ReconfigureEncoder();
last_parameters_update_ms_.emplace(now_ms);
} else if (!last_parameters_update_ms_ ||
now_ms - *last_parameters_update_ms_ >=
vcm::VCMProcessTimer::kDefaultProcessIntervalMs) {
video_sender_.UpdateChannelParemeters(rate_allocator_.get(),
bitrate_observer_);
last_parameters_update_ms_.emplace(now_ms);
}
if (EncoderPaused()) {
TraceFrameDropStart();
return;
}
TraceFrameDropEnd();
VideoFrame out_frame(video_frame);
// Crop frame if needed.
if (crop_width_ > 0 || crop_height_ > 0) {
int cropped_width = video_frame.width() - crop_width_;
int cropped_height = video_frame.height() - crop_height_;
rtc::scoped_refptr<I420Buffer> cropped_buffer =
I420Buffer::Create(cropped_width, cropped_height);
// TODO(ilnik): Remove scaling if cropping is too big, as it should never
// happen after SinkWants signaled correctly from ReconfigureEncoder.
if (crop_width_ < 4 && crop_height_ < 4) {
cropped_buffer->CropAndScaleFrom(
*video_frame.video_frame_buffer()->ToI420(), crop_width_ / 2,
crop_height_ / 2, cropped_width, cropped_height);
} else {
cropped_buffer->ScaleFrom(
*video_frame.video_frame_buffer()->ToI420().get());
}
out_frame =
VideoFrame(cropped_buffer, video_frame.timestamp(),
video_frame.render_time_ms(), video_frame.rotation());
out_frame.set_ntp_time_ms(video_frame.ntp_time_ms());
}
TRACE_EVENT_ASYNC_STEP0("webrtc", "Video", video_frame.render_time_ms(),
"Encode");
overuse_detector_->FrameCaptured(out_frame, time_when_posted_us);
capturePreprocessingFrameCount++;
long currentPreprocessingTime = clock_->TimeInMicroseconds()/rtc::kNumMicrosecsPerMillisec;
if(nextCapturePreprocessingStatisticsTime == -1) {
nextCapturePreprocessingStatisticsTime = currentPreprocessingTime + UNIT_TIME_INTERVAL;
}
if(currentPreprocessingTime > nextCapturePreprocessingStatisticsTime) {
RTC_LOG(LS_INFO) << "statistics VideoStreamEncoder preprocessing capture frame count:"
<< capturePreprocessingFrameCount;
nextCapturePreprocessingStatisticsTime = currentPreprocessingTime + UNIT_TIME_INTERVAL;
capturePreprocessingFrameCount = 0;
}
//有必要的话先裁剪缩放,然后调用VideoSender的AddVideoFrame函数。
//这个函数做了很多工作, 需要细细研究。
video_sender_.AddVideoFrame(out_frame, nullptr);
}
VideoSender类的AddVideoFrame函数:
// Add one raw video frame to the encoder, blocking.
int32_t VideoSender::AddVideoFrame(const VideoFrame& videoFrame,
const CodecSpecificInfo* codecSpecificInfo) {
int32_t ret =
_encoder->Encode(converted_frame, codecSpecificInfo, next_frame_types);
VCMGenericEncoder类的Encode函数:
int32_t VCMGenericEncoder::Encode(const VideoFrame& frame,
const CodecSpecificInfo* codec_specific,
const std::vector<FrameType>& frame_types) {
//在这里经过一些wrapper封装的类之后,最终进入到HardwareVideoEncoder.java的
//encode函数进行编码,编码完成之后调用传递过来的回调,将编码后的图像传递上
//去,进行rtp打包发送。
return encoder_->Encode(frame, codec_specific, &frame_types);
到了这里webrtc视频的采集和编码整个流程就基本完成了,具体细节需要深入研究,感兴趣的可以进一步的研究。整个流程可能出现偏差,但是大致方向是对的。作者比较懒,没有给出时序图,那位有兴趣的可以自行画出。还有那位大佬如果知道webrtc的android版本如何打印堆栈,请告知。在下测试了好多方法,最终没能搞定。
最后说明一下,阅读webrtc的采集和编码是为了进行一些细节的优化,或者是策略的选择,更甚是某些平台性能的提升,基于此,写了这篇文章,方便自己日后的复习,以及和大家相互学习。
基于采集和编码流程的了解,以及部分细节的研究,我在相应平台上做了视频质量的优化。参加下一篇文章《webrtc之Android视频质量提升》。