Skip to content

Commit

Permalink
Convenience API for iterating frames from a participant (#396)
Browse files Browse the repository at this point in the history
Convencience API for iterating frames
  • Loading branch information
keepingitneil authored Sep 5, 2024
1 parent c478f98 commit 0fe6ae7
Show file tree
Hide file tree
Showing 11 changed files with 482 additions and 51 deletions.
8 changes: 4 additions & 4 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

11 changes: 11 additions & 0 deletions livekit-ffi/protocol/audio_frame.proto
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ package livekit.proto;
option csharp_namespace = "LiveKit.Proto";

import "handle.proto";
import "track.proto";

// Create a new AudioStream
// AudioStream is used to receive audio frames from a track
Expand All @@ -29,6 +30,16 @@ message NewAudioStreamRequest {
}
message NewAudioStreamResponse { OwnedAudioStream stream = 1; }

message AudioStreamFromParticipantRequest {
uint64 participant_handle = 1;
AudioStreamType type = 2;
optional TrackSource track_source = 3;
uint32 sample_rate = 5;
uint32 num_channels = 6;
}

message AudioStreamFromParticipantResponse { OwnedAudioStream stream = 1; }

// Create a new AudioSource
message NewAudioSourceRequest {
AudioSourceType type = 1;
Expand Down
29 changes: 16 additions & 13 deletions livekit-ffi/protocol/ffi.proto
Original file line number Diff line number Diff line change
Expand Up @@ -82,15 +82,16 @@ message FfiRequest {
NewVideoSourceRequest new_video_source = 21;
CaptureVideoFrameRequest capture_video_frame = 22;
VideoConvertRequest video_convert = 23;
VideoStreamFromParticipantRequest video_stream_from_participant = 24;

// Audio
NewAudioStreamRequest new_audio_stream = 24;
NewAudioSourceRequest new_audio_source = 25;
CaptureAudioFrameRequest capture_audio_frame = 26;
NewAudioResamplerRequest new_audio_resampler = 27;
RemixAndResampleRequest remix_and_resample = 28;

E2eeRequest e2ee = 29;
NewAudioStreamRequest new_audio_stream = 25;
NewAudioSourceRequest new_audio_source = 26;
CaptureAudioFrameRequest capture_audio_frame = 27;
NewAudioResamplerRequest new_audio_resampler = 28;
RemixAndResampleRequest remix_and_resample = 29;
E2eeRequest e2ee = 30;
AudioStreamFromParticipantRequest audio_stream_from_participant = 31;
}
}

Expand Down Expand Up @@ -125,14 +126,16 @@ message FfiResponse {
NewVideoSourceResponse new_video_source = 21;
CaptureVideoFrameResponse capture_video_frame = 22;
VideoConvertResponse video_convert = 23;
VideoStreamFromParticipantResponse video_stream_from_participant = 24;

// Audio
NewAudioStreamResponse new_audio_stream = 24;
NewAudioSourceResponse new_audio_source = 25;
CaptureAudioFrameResponse capture_audio_frame = 26;
NewAudioResamplerResponse new_audio_resampler = 27;
RemixAndResampleResponse remix_and_resample = 28;
E2eeResponse e2ee = 29;
NewAudioStreamResponse new_audio_stream = 25;
NewAudioSourceResponse new_audio_source = 26;
CaptureAudioFrameResponse capture_audio_frame = 27;
NewAudioResamplerResponse new_audio_resampler = 28;
RemixAndResampleResponse remix_and_resample = 29;
AudioStreamFromParticipantResponse audio_stream_from_participant = 30;
E2eeResponse e2ee = 31;
}
}

Expand Down
12 changes: 12 additions & 0 deletions livekit-ffi/protocol/video_frame.proto
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ package livekit.proto;
option csharp_namespace = "LiveKit.Proto";

import "handle.proto";
import "track.proto";

// Create a new VideoStream
// VideoStream is used to receive video frames from a track
Expand All @@ -30,6 +31,17 @@ message NewVideoStreamRequest {
}
message NewVideoStreamResponse { OwnedVideoStream stream = 1; }

// Request a video stream from a participant
message VideoStreamFromParticipantRequest {
uint64 participant_handle = 1;
VideoStreamType type = 2;
TrackSource track_source = 3;
optional VideoBufferType format = 4;
bool normalize_stride = 5;
}

message VideoStreamFromParticipantResponse { OwnedVideoStream stream = 1;}

// Create a new VideoSource
// VideoSource is used to send video frame to a track
message NewVideoSourceRequest {
Expand Down
78 changes: 64 additions & 14 deletions livekit-ffi/src/livekit.proto.rs
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
// @generated
// This file is @generated by prost-build.
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct FrameCryptor {
Expand Down Expand Up @@ -1572,6 +1573,27 @@ pub struct NewVideoStreamResponse {
#[prost(message, optional, tag="1")]
pub stream: ::core::option::Option<OwnedVideoStream>,
}
/// Request a video stream from a participant
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct VideoStreamFromParticipantRequest {
#[prost(uint64, tag="1")]
pub participant_handle: u64,
#[prost(enumeration="VideoStreamType", tag="2")]
pub r#type: i32,
#[prost(enumeration="TrackSource", tag="3")]
pub track_source: i32,
#[prost(enumeration="VideoBufferType", optional, tag="4")]
pub format: ::core::option::Option<i32>,
#[prost(bool, tag="5")]
pub normalize_stride: bool,
}
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct VideoStreamFromParticipantResponse {
#[prost(message, optional, tag="1")]
pub stream: ::core::option::Option<OwnedVideoStream>,
}
/// Create a new VideoSource
/// VideoSource is used to send video frame to a track
#[allow(clippy::derive_partial_eq_without_eq)]
Expand Down Expand Up @@ -2891,6 +2913,26 @@ pub struct NewAudioStreamResponse {
#[prost(message, optional, tag="1")]
pub stream: ::core::option::Option<OwnedAudioStream>,
}
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct AudioStreamFromParticipantRequest {
#[prost(uint64, tag="1")]
pub participant_handle: u64,
#[prost(enumeration="AudioStreamType", tag="2")]
pub r#type: i32,
#[prost(enumeration="TrackSource", optional, tag="3")]
pub track_source: ::core::option::Option<i32>,
#[prost(uint32, tag="5")]
pub sample_rate: u32,
#[prost(uint32, tag="6")]
pub num_channels: u32,
}
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct AudioStreamFromParticipantResponse {
#[prost(message, optional, tag="1")]
pub stream: ::core::option::Option<OwnedAudioStream>,
}
/// Create a new AudioSource
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
Expand Down Expand Up @@ -3162,7 +3204,7 @@ impl AudioSourceType {
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct FfiRequest {
#[prost(oneof="ffi_request::Message", tags="2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29")]
#[prost(oneof="ffi_request::Message", tags="2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31")]
pub message: ::core::option::Option<ffi_request::Message>,
}
/// Nested message and enum types in `FfiRequest`.
Expand Down Expand Up @@ -3217,26 +3259,30 @@ pub mod ffi_request {
CaptureVideoFrame(super::CaptureVideoFrameRequest),
#[prost(message, tag="23")]
VideoConvert(super::VideoConvertRequest),
/// Audio
#[prost(message, tag="24")]
NewAudioStream(super::NewAudioStreamRequest),
VideoStreamFromParticipant(super::VideoStreamFromParticipantRequest),
/// Audio
#[prost(message, tag="25")]
NewAudioSource(super::NewAudioSourceRequest),
NewAudioStream(super::NewAudioStreamRequest),
#[prost(message, tag="26")]
CaptureAudioFrame(super::CaptureAudioFrameRequest),
NewAudioSource(super::NewAudioSourceRequest),
#[prost(message, tag="27")]
NewAudioResampler(super::NewAudioResamplerRequest),
CaptureAudioFrame(super::CaptureAudioFrameRequest),
#[prost(message, tag="28")]
RemixAndResample(super::RemixAndResampleRequest),
NewAudioResampler(super::NewAudioResamplerRequest),
#[prost(message, tag="29")]
RemixAndResample(super::RemixAndResampleRequest),
#[prost(message, tag="30")]
E2ee(super::E2eeRequest),
#[prost(message, tag="31")]
AudioStreamFromParticipant(super::AudioStreamFromParticipantRequest),
}
}
/// This is the output of livekit_ffi_request function.
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct FfiResponse {
#[prost(oneof="ffi_response::Message", tags="2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29")]
#[prost(oneof="ffi_response::Message", tags="2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31")]
pub message: ::core::option::Option<ffi_response::Message>,
}
/// Nested message and enum types in `FfiResponse`.
Expand Down Expand Up @@ -3291,18 +3337,22 @@ pub mod ffi_response {
CaptureVideoFrame(super::CaptureVideoFrameResponse),
#[prost(message, tag="23")]
VideoConvert(super::VideoConvertResponse),
/// Audio
#[prost(message, tag="24")]
NewAudioStream(super::NewAudioStreamResponse),
VideoStreamFromParticipant(super::VideoStreamFromParticipantResponse),
/// Audio
#[prost(message, tag="25")]
NewAudioSource(super::NewAudioSourceResponse),
NewAudioStream(super::NewAudioStreamResponse),
#[prost(message, tag="26")]
CaptureAudioFrame(super::CaptureAudioFrameResponse),
NewAudioSource(super::NewAudioSourceResponse),
#[prost(message, tag="27")]
NewAudioResampler(super::NewAudioResamplerResponse),
CaptureAudioFrame(super::CaptureAudioFrameResponse),
#[prost(message, tag="28")]
RemixAndResample(super::RemixAndResampleResponse),
NewAudioResampler(super::NewAudioResamplerResponse),
#[prost(message, tag="29")]
RemixAndResample(super::RemixAndResampleResponse),
#[prost(message, tag="30")]
AudioStreamFromParticipant(super::AudioStreamFromParticipantResponse),
#[prost(message, tag="31")]
E2ee(super::E2eeResponse),
}
}
Expand Down
Loading

0 comments on commit 0fe6ae7

Please sign in to comment.