This commit is contained in:
Sidya 2023-06-10 02:57:36 -04:00
parent 13cde58ffb
commit 4fbd56d13a
12 changed files with 1280 additions and 0 deletions

19
.gitignore vendored Normal file
View File

@ -0,0 +1,19 @@
# Generated by Cargo
# will have compiled files and executables
debug/
target/
# Remove Cargo.lock from gitignore if creating an executable, leave it for libraries
# More information here https://doc.rust-lang.org/cargo/guide/cargo-toml-vs-cargo-lock.html
Cargo.lock
# These are backup files generated by rustfmt
**/*.rs.bk
# MSVC Windows builds of rustc generate these, which store debugging information
*.pdb
# IDE related files
gd-extension-rs-ffmepg.iml
.idea/*

15
Cargo.toml Normal file
View File

@ -0,0 +1,15 @@
[package]
name = "gd-extension-rs-ffmepg"
version = "0.1.0"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[lib]
crate-type = ["cdylib"]
[dependencies]
libc = "0.2.144"
anyhow = "1.0.71"
rsmpeg = "0.14.1"
godot = { git = "https://github.com/godot-rust/gdext", branch = "master" }

101
src/lib.rs Normal file
View File

@ -0,0 +1,101 @@
use godot::prelude::*;
use godot::engine::{Node, NodeVirtual, Texture};
use crate::metadata::VideoMetadata;
mod metadata;
mod playback;
struct Main;
#[gdextension]
unsafe impl ExtensionLibrary for Main {}
// A Video Player Node.
#[derive(GodotClass)]
#[class(base=Node)]
struct VideoPlayer {
metadata : Option<VideoMetadata>,
#[export(get = get_has_video)]
has_video: bool,
#[export(get = get_has_audio)]
has_audio: bool,
#[export(get = get_duration)]
duration: i64,
#[base]
base: Base<Node>
}
#[godot_api]
impl NodeVirtual for VideoPlayer {
//fn init(base: Base<Node>) -> Self {
fn init(base: Base<Node>) -> Self {
Self {
metadata: None,
has_video: false,
has_audio: false,
duration: -1,
base
}
}
}
#[godot_api]
impl VideoPlayer {
/// Load a video file from `path`.
/// Return true if successfully loaded else false.
#[func]
fn load_file(&mut self, path : GodotString) -> bool {
if let Ok(m) = VideoMetadata::get_metadata(path.to_string().as_str()) {
self.metadata = Some(m);
return true
}
false
}
/// Return the version string for ffmepg
#[func]
fn ffmpeg_version(&mut self) -> GodotString {
VideoMetadata::get_version().into()
}
/// Return the license string for ffmepg
#[func]
fn ffmpeg_license(&mut self) -> GodotString {
VideoMetadata::get_license().into()
}
/// Return true if audio stream is loaded
#[func]
fn get_has_audio(&self) -> bool {
false
}
/// Return true if video stream is loaded
#[func]
fn get_has_video(&self) -> bool {
if let Some(_) = &self.metadata {
return true
}
false
}
/// Return the duration en micro second of the video or -1 if no video is loaded
#[func]
fn get_duration(&self) -> i64 {
if let Some(m) = &self.metadata {
return m.duration
}
-1
}
/// Return the duration en micro second of the video or -1 if no video is loaded
#[func]
fn get_texture(&self) -> Gd<Texture>{
todo!()
}
}

105
src/metadata.rs Normal file
View File

@ -0,0 +1,105 @@
use std::ffi::{c_char, CStr, CString};
use rsmpeg::avcodec::AVCodecContext;
use rsmpeg::avformat::AVFormatContextInput;
use rsmpeg::avutil::av_q2d;
use rsmpeg::ffi;
use anyhow::Context;
use rsmpeg::ffi::{av_version_info,avutil_license};
pub struct VideoMetadata{
pub duration : i64,
pub frame_rate: f64,
pub width: i32,
pub height: i32
}
impl VideoMetadata {
pub fn get_metadata(path: &str) -> anyhow::Result<VideoMetadata> {
let path = &CString::new(path.to_string())?;
let input_format_context = AVFormatContextInput::open(path)?;
let duration = input_format_context.duration;
let (video_stream_index, decoder) = input_format_context
.find_best_stream(ffi::AVMediaType_AVMEDIA_TYPE_VIDEO)?
.context("Failed to find video stream")?;
let video_stream = input_format_context
.streams()
.get(video_stream_index)
.unwrap();
let frame_rate = av_q2d(video_stream.r_frame_rate) as f64;
// Get `width` and `height` from `decode_context`
let mut decode_context = AVCodecContext::new(&decoder);
decode_context.apply_codecpar(&video_stream.codecpar())?;
decode_context.open(None)?;
let width = decode_context.width;
let height = decode_context.height;
let (video_stream_index, decoder) = input_format_context
.find_best_stream(ffi::AVMediaType_AVMEDIA_TYPE_AUDIO)?
.context("Failed to find audio stream")?;
Ok(VideoMetadata{
duration,
frame_rate,
width,
height,
})
}
pub fn get_version() -> String {
let c_buf: *const c_char = unsafe { av_version_info() };
let c_str: &CStr = unsafe { CStr::from_ptr(c_buf) };
let str_slice: &str = c_str.to_str().unwrap();
str_slice.to_owned()
}
pub fn get_license() -> String {
let c_buf: *const c_char = unsafe { avutil_license() };
let c_str: &CStr = unsafe { CStr::from_ptr(c_buf) };
let str_slice: &str = c_str.to_str().unwrap();
str_slice.to_owned()
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_get_metadata_video_and_audio_file() {
let metadata = VideoMetadata::get_metadata("tests/assets/video1.mp4").unwrap();
assert_eq!(metadata.duration, 25406667);
assert_eq!(metadata.frame_rate, 29.97002997002997);
assert_eq!(metadata.width, 1920);
assert_eq!(metadata.height, 1080);
}
#[test]
fn test_get_metadata_video_only_file() {
todo!()
}
#[test]
fn test_get_metadata_audio_only_file() {
todo!()
}
#[test]
fn test_get_metadata_invalid_file() {
todo!()
}
#[test]
fn test_get_version() {
assert!(!VideoMetadata::get_version().is_empty());
}
#[test]
fn test_get_license() {
assert!(!VideoMetadata::get_license().is_empty());
}
}

49
src/playback/constant.rs Normal file
View File

@ -0,0 +1,49 @@
pub const MAX_QUEUE_SIZE: usize = (15 * 1024 * 1024);
pub const MIN_FRAMES : usize = 25;
pub const EXTERNAL_CLOCK_MIN_FRAMES : usize = 2;
pub const EXTERNAL_CLOCK_MAX_FRAMES: usize = 10;
/* Minimum SDL audio buffer size, in samples. */
pub const SDL_AUDIO_MIN_BUFFER_SIZE : usize = 512;
/* Calculate actual buffer size keeping in mind not cause too frequent audio callbacks */
pub const SDL_AUDIO_MAX_CALLBACKS_PER_SEC: usize = 30;
/* Step size for volume control in dB */
pub const SDL_VOLUME_STEP : f32 = (0.75);
/* no AV sync correction is done if below the minimum AV sync threshold */
pub const AV_SYNC_THRESHOLD_MIN : f32 = 0.04;
/* AV sync correction is done if above the maximum AV sync threshold */
pub const AV_SYNC_THRESHOLD_MAX : f32 = 0.1;
/* If a frame duration is longer than this, it will not be duplicated to compensate AV sync */
pub const AV_SYNC_FRAMEDUP_THRESHOLD : f32 =0.1;
/* no AV correction is done if too big error */
pub const AV_NOSYNC_THRESHOLD : f32 = 10.0;
/* maximum audio speed change to get correct sync */
pub const SAMPLE_CORRECTION_PERCENT_MAX : usize = 10;
/* external clock speed adjustment constants for realtime sources based on buffer fullness */
pub const EXTERNAL_CLOCK_SPEED_MIN : f32 = 0.900;
pub const EXTERNAL_CLOCK_SPEED_MAX : f32 = 1.010;
pub const EXTERNAL_CLOCK_SPEED_STEP : f32 = 0.001;
/* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
pub const AUDIO_DIFF_AVG_NB : usize = 20;
/* polls for possible required screen refresh at least this often, should be less than 1/fps */
pub const REFRESH_RATE : f32 = 0.01;
/* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
/* TODO: We assume that a decoded and resampled frame fits into this buffer */
pub const SAMPLE_ARRAY_SIZE : usize = (8 * 65536);
pub const CURSOR_HIDE_DELAY : usize = 1000000;
pub const USE_ONEPASS_SUBTITLE_RENDER : bool = true;
pub const VIDEO_PICTURE_QUEUE_SIZE : usize = 3;
pub const SUBPICTURE_QUEUE_SIZE : usize = 16;
pub const SAMPLE_QUEUE_SIZE : usize = 9;
// FIXME const FRAME_QUEUE_SIZE ffmax(SAMPLE_QUEUE_SIZE, ffmax(VIDEO_PICTURE_QUEUE_SIZE, SUBPICTURE_QUEUE_SIZE))
pub const FRAME_QUEUE_SIZE : usize = ffmax!(SAMPLE_QUEUE_SIZE, (ffmax!(VIDEO_PICTURE_QUEUE_SIZE, SUBPICTURE_QUEUE_SIZE)));

180
src/playback/decoder.rs Normal file
View File

@ -0,0 +1,180 @@
use std::ptr::null_mut;
use std::sync::Condvar;
use std::thread::ThreadId;
use libc::{EAGAIN, endservent};
use rsmpeg::avcodec::{AVCodecContext, AVPacket};
use rsmpeg::avutil::{av_rescale_q, AVFrame};
use rsmpeg::error::RsmpegError;
use rsmpeg::error::RsmpegError::AVError;
use rsmpeg::ffi::{AV_NOPTS_VALUE, AVERROR_EOF};
use crate::playback::DECODER_REORDER_PTS;
use crate::playback::packet::MutexPacketQueue;
use rsmpeg::ffi::{ AVRational, avcodec_flush_buffers };
pub struct Decoder {
pkt : Box<AVPacket>,
queue : Box<MutexPacketQueue>,
avctx : Box<AVCodecContext>,
pkt_serial : Box<i64>,
finished: i64,
packet_pending: bool,
empty_queue_cond: Box<Condvar>,
start_pts : i64,
start_pts_tb : AVRational,
next_pts : i64,
next_pts_tb: AVRational,
decoder_tid: Box<ThreadId>,
}
impl Decoder {
fn new(avctx: AVCodecContext, queue: MutexPacketQueue, tid: ThreadId) -> Self {
Decoder {
pkt: Box::new(Default::default()),
queue: Box::new(queue),
avctx: Box::new(avctx),
pkt_serial: Box::new(0),
finished: 0,
packet_pending: false,
empty_queue_cond: Box::new(Default::default()),
start_pts: 0,
start_pts_tb: AVRational {num: 1, den : 1},
next_pts: 0,
next_pts_tb: AVRational {num : 1, den : 1},
decoder_tid: Box::new(tid),
}
}
pub fn decode_frame(&mut self, av_frame: &mut AVFrame, sub: &mut rsmpeg::ffi::AVSubtitle) -> Result<(),RsmpegError> { // TODO Recheck
let mut ret = Err(AVError(EAGAIN));
loop {
if self.queue.data.serial == *self.pkt_serial {
loop {
if self.queue.data.abort_request {
return Err(AVError(-1));
}
let codec_type = self.avctx.extract_codecpar().codec_type();
ret = if codec_type.is_video() {
let ret_receive = self.avctx.receive_frame();
if let Ok(mut frame) = ret_receive {
if ! DECODER_REORDER_PTS {
frame.set_pts(frame.best_effort_timestamp);
} else {
frame.set_pts(frame.pkt_dts);
}
*av_frame = frame;
Ok(())
} else {
Err(ret_receive.unwrap_err())
}
} else if codec_type.is_audio() {
let ret_receive = self.avctx.receive_frame();
if let Ok(mut frame) = ret_receive {
let tb = AVRational {num: 1, den: frame.sample_rate};
if frame.pts != AV_NOPTS_VALUE {
frame.set_pts(av_rescale_q(frame.pts, self.avctx.pkt_timebase, tb))
} else if self.next_pts != AV_NOPTS_VALUE {
frame.set_pts(av_rescale_q(self.next_pts, self.next_pts_tb, tb));
}
if frame.pts != AV_NOPTS_VALUE {
self.next_pts = frame.pts + frame.nb_samples as i64;
self.next_pts_tb = tb;
}
*av_frame = frame;
Ok(())
} else {
Err(ret_receive.unwrap_err())
}
} else {
ret
};
if Some(&AVError(AVERROR_EOF)) == ret.as_ref().err(){
self.finished = *self.pkt_serial;
unsafe { avcodec_flush_buffers(self.avctx.as_mut_ptr())};
return Ok(()); // TODO
}
if let Some(&AVError(x)) = ret.as_ref().err() {
if x >= 0 {
return Err(AVError(1))
}
}
if Some(&AVError(EAGAIN)) == ret.as_ref().err() {
break;
}
}
}
loop {
{
let mut pq = self.queue.mutex.lock().unwrap();
while self.queue.data.queue.len() == 0 {
pq = self.empty_queue_cond.wait(pq).unwrap();
}
}
if ! self.packet_pending {
let old_serial = *self.pkt_serial;
// if packet_queue_get(&d.queue, &mut d.pkt, true, Some(&mut d.pkt_serial)) < 0 {// TODO
if let Some(AVError(x)) = &mut self.queue.get( &mut self.pkt, true, Some(&mut self.pkt_serial)).err(){// TODO
if (*x as i32) < 0 {
return Err(AVError(-1));
}
}
if old_serial != *self.pkt_serial {
unsafe { avcodec_flush_buffers(self.avctx.as_mut_ptr())};
self.finished = 0;
self.next_pts = self.start_pts;
self.next_pts_tb = self.start_pts_tb;
}
}
{
if self.queue.data.serial == *self.pkt_serial {
break;
}
}
//
// TODO av_packet_unref(d->pkt);
}
if self.avctx.extract_codecpar().codec_type().is_subtitle() {
let got_frame = 0;
let mut ret = self.avctx.decode_subtitle(Some(&mut *self.pkt)); // TODO no return ?
if ret.is_err() {
ret = Err(AVError(EAGAIN));
} else {
if ret.as_ref().unwrap().is_some() && self.pkt.data != null_mut() {
// *sub = *ret.unwrap().unwrap(); FIXME
self.packet_pending = true;
}
ret = if ret.as_ref().unwrap().is_some() { Ok(None) } else {
if self.pkt.data != null_mut() {
Err(AVError(EAGAIN))
} else {
Err(AVError(AVERROR_EOF))
}
};
//av_packet_unref(d->pkt);
}
} else {
if self.avctx.send_packet(Some(&self.pkt)).err() == Some(AVError(EAGAIN)) {
//av_log(&d.avctx, AV_LOG_ERROR,
// "Receive_frame and send_packet both returned EAGAIN, which is an API violation.\n");
self.packet_pending = true;
} else {
//av_packet_unref(d->pkt);
}
}
}
}
}

91
src/playback/frame.rs Normal file
View File

@ -0,0 +1,91 @@
use std::ptr::{null, null_mut};
use rsmpeg::avcodec::AVSubtitle;
use rsmpeg::avutil::AVFrame;
use crate::playback::packet::PacketQueue;
use rsmpeg::ffi::AVRational;
use crate::playback::constant::FRAME_QUEUE_SIZE;
use crate::playback::mutex::{MutexQueue, New};
pub struct FrameData {
pub pkt_pos: i64,
}
/* Common struct for handling all types of decoded data and allocated render buffers. */
pub struct Frame {
pub frame: Box<AVFrame>,
pub sub: Option<Box<AVSubtitle>>,
pub serial: i64,
pub pts: f32, /* presentation timestamp for the frame */
pub duration: f32, /* estimated duration of the frame */
pub pos: i64, /* byte position of the frame in the input file */
pub width: i64,
pub height : i64,
pub format : i64,
pub sar : AVRational,
pub uploaded : bool,
pub flip_v : bool,
}
pub type MutexFrameQueue = MutexQueue<FrameQueue>;
pub struct FrameQueue {
pub queue : [Option<Frame>; FRAME_QUEUE_SIZE],
pub rindex : usize,
pub windex: usize,
pub size : usize,
pub max_size: usize,
pub keep_last: i64,
pub rindex_shown: usize,
pub pktq : Box<PacketQueue>,
}
impl New for FrameQueue {
fn new() -> Self {
FrameQueue {
queue: Default::default(),
rindex: 0,
windex: 0,
size: 0,
max_size: 0,
keep_last: 0,
rindex_shown: 0,
pktq: Box::new(PacketQueue::new()),
}
}
}
impl MutexFrameQueue {
pub fn signal(&self) {
let _unused = self.mutex.lock().unwrap();
self.cond.notify_one();
}
pub fn peek(&self) -> &Option<Frame>
{
return &self.data.queue[(self.data.rindex + self.data.rindex_shown) % self.data.max_size];
}
pub fn peek_next(&self) -> &Option<Frame>
{
return &self.data.queue[(self.data.rindex + self.data.rindex_shown + 1) % self.data.max_size];
}
pub fn peek_last(&self) -> &Option<Frame>{
&self.data.queue[self.data.rindex]
}
pub fn peek_writable(&mut self) -> &Option<Frame> {
/* wait until we have space to put a new frame */
let mut m = self.mutex.lock().unwrap();
let pq = &mut self.data;
while pq.size >= pq.max_size && !pq.pktq.abort_request {
m = self.cond.wait(m).unwrap();
}
if pq.pktq.abort_request {
return &None;
}
& pq.queue[pq.windex]
}
}

500
src/playback/mod.rs Normal file
View File

@ -0,0 +1,500 @@
use std::ffi::c_int;
use std::io::Write;
use std::ptr::{null_mut};
use std::sync::{Condvar};
use std::thread::ThreadId;
use godot::engine::{Image, ImageTexture, Texture};
use godot::engine::image::Format;
use godot::prelude::{GdRef, real, Rect2};
use libc::EAGAIN;
use rsmpeg::avcodec::{AVCodecContext, AVPacket};
use rsmpeg::avformat::{AVInputFormat, AVStream};
use rsmpeg::avutil::{av_cmp_q, av_make_q, av_mul_q, av_rescale_q, AVFrame, AVSampleFormat, AVPixelFormat, AVImage};
use rsmpeg::error::RsmpegError;
use rsmpeg::error::RsmpegError::AVError;
use rsmpeg::swresample::SwrContext;
use rsmpeg::ffi::{AV_NOPTS_VALUE, av_image_alloc, sws_scale, av_get_pix_fmt_name, AVChannelLayout, avcodec_flush_buffers, av_image_get_buffer_size, AVERROR_EOF, AVFormatContext, AVRational, av_rescale, AVSubtitleRect, sws_getCachedContext, AV_LOG_FATAL, av_log};
use rsmpeg::ffi::{AVPixelFormat_AV_PIX_FMT_RGB8, AVPixelFormat_AV_PIX_FMT_RGBA, AVPixelFormat_AV_PIX_FMT_RGB24};
use rsmpeg::swscale::SwsContext;
use crate::playback::decoder::Decoder;
use crate::playback::frame::{Frame, FrameQueue, MutexFrameQueue};
use crate::playback::packet::{MutexPacketQueue, PacketQueue};
#[macro_export]
macro_rules! ffmax {
( $a:tt , $b:tt ) => {if $a > $b { $a } else { $b }};
}
mod constant;
mod decoder;
mod frame;
mod mutex;
mod packet;
struct AudioParams {
freq : i64,
ch_layout : AVChannelLayout,
fmt : AVSampleFormat,
frame_size: i64,
bytes_per_sec: i64,
}
struct Clock {
pts : f32, /* clock base */
pts_drift: f32, /* clock base minus time at which we updated the clock */
last_updated: f32,
speed: f32,
serial : i64 , /* clock is based on a packet with this serial */
paused : bool,
queue_serial: *const i64, /* pointer to the current packet queue serial, used for obsolete clock detection */
}
enum AvSyncMode {
AV_SYNC_AUDIO_MASTER, /* default choice */
AV_SYNC_VIDEO_MASTER,
AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
}
struct VideoState {
read_tid: Box<ThreadId>,
iformat: Box<AVInputFormat>,
abort_request : bool,
force_refresh : bool,
paused: bool,
last_paused : i64,
queue_attachments_req: i64,
seek_req : i64,
seek_flags : i64,
seek_pos: i64,
seek_rel : i64,
read_pause_return : i64,
ic : Box<AVFormatContext>,
realtime: i64,
audclk : Clock,
vidclk : Clock,
extclk : Clock,
pictq : MutexFrameQueue,
subpq : MutexFrameQueue,
sampq : MutexFrameQueue,
auddec : Decoder,
viddec : Decoder,
subdec : Decoder,
audio_stream: i64,
av_sync_type: i64,
audio_clock: f32,
audio_clock_serial : i64,
audio_diff_cum : f32, /* used for AV difference average computation */
audio_diff_avg_coef : f32,
audio_diff_threshold: f32,
audio_diff_avg_count: i64,
audio_st: *const AVStream,
audioq : PacketQueue,
audio_hw_buf_size: i64,
audio_buf: *const i8,
audio_buf1 : *const i8,
audio_buf_size : usize, /* in bytes */
audio_buf1_size : usize,
audio_buf_index: i64, /* in bytes */
audio_write_buf_size : i64,
audio_volume : i64,
muted : bool,
audio_src : AudioParams,
audio_filter_src: AudioParams,
audio_tgt : AudioParams,
swr_ctx: *const SwrContext,
frame_drops_early : bool,
frame_drops_late : bool,
subtitle_stream: usize,
subtitle_st :Option<Box<AVStream>>,
subtitleq: PacketQueue,
width: usize,
height: usize,
xleft: usize,
ytop: usize,
img_convert_ctx: Box<SwsContext>,
}
enum EShowMode {
SHOW_MODE_NONE = -1, SHOW_MODE_VIDEO = 0, SHOW_MODE_WAVES, SHOW_MODE_RDFT, SHOW_MODE_NB}
// static sample_array: [i16 ; SAMPLE_ARRAY_SIZE as usize] = [];
// int sample_array_index;
// int last_i_start;
// RDFTContext *rdft;
// int rdft_bits;
// FFTSample *rdft_data;
// int xpos;
// double last_vis_time;
// SDL_Texture *vis_texture;
// SDL_Texture *sub_texture;
// SDL_Texture *vid_texture;
//
// int subtitle_stream;
// AVStream *subtitle_st;
// PacketQueue subtitleq;
//
// double frame_timer;
// double frame_last_returned_time;
// double frame_last_filter_delay;
// int video_stream;
// AVStream *video_st;
// PacketQueue videoq;
// double max_frame_duration; // maximum duration of a frame - above this, we consider the jump a timestamp discontinuity
// struct SwsContext *sub_convert_ctx;
// int eof;
//
// char *filename;
// int width, height, xleft, ytop;
// int step;
//
// int vfilter_idx;
// AVFilterContext *in_video_filter; // the first filter in the video chain
// AVFilterContext *out_video_filter; // the last filter in the video chain
// AVFilterContext *in_audio_filter; // the first filter in the audio chain
// AVFilterContext *out_audio_filter; // the last filter in the audio chain
// AVFilterGraph *agraph; // audio filter graph
//
// int last_video_stream, last_audio_stream, last_subtitle_stream;
//
// SDL_cond *continue_read_thread;
// } VideoState;
//
// /* options specified by the user */
// static const AVInputFormat *file_iformat;
// static const char *input_filename;
// static const char *window_title;
// static int default_width = 640;
// static int default_height = 480;
// static int screen_width = 0;
// static int screen_height = 0;
// static int screen_left = SDL_WINDOWPOS_CENTERED;
// static int screen_top = SDL_WINDOWPOS_CENTERED;
// static int audio_disable;
// static int video_disable;
// static int subtitle_disable;
// static const char* wanted_stream_spec[AVMEDIA_TYPE_NB] = {0};
// static int seek_by_bytes = -1;
// static float seek_interval = 10;
// static int display_disable;
// static int borderless;
// static int alwaysontop;
// static int startup_volume = 100;
// static int show_status = -1;
// static int av_sync_type = AV_SYNC_AUDIO_MASTER;
// static int64_t start_time = AV_NOPTS_VALUE;
// static int64_t duration = AV_NOPTS_VALUE;
// static int fast = 0;
// static int genpts = 0;
// static int lowres = 0;
static DECODER_REORDER_PTS: bool = false;
// static int autoexit;
// static int exit_on_keydown;
// static int exit_on_mousedown;
// static int loop = 1;
// static int framedrop = -1;
// static int infinite_buffer = -1;
// static enum ShowMode show_mode = SHOW_MODE_NONE;
// static const char *audio_codec_name;
// static const char *subtitle_codec_name;
// static const char *video_codec_name;
// double rdftspeed = 0.02;
// static int64_t cursor_last_shown;
// static int cursor_hidden = 0;
// static const char **vfilters_list = NULL;
// static int nb_vfilters = 0;
// static char *afilters = NULL;
// static int autorotate = 1;
// static int find_stream_info = 1;
// static int filter_nbthreads = 0;
fn calculate_display_rect(
rect: &mut Rect2,
scr_xleft: i64,
scr_ytop: i64,
scr_width: i64,
scr_height: i64,
pic_width: i64,
pic_height: i64,
pic_sar: AVRational,
) {
let mut aspect_ratio = pic_sar;
let (mut width, mut height, mut x, mut y): (i64, i64, i64, i64);
if av_cmp_q(aspect_ratio, av_make_q(0, 1)) <= 0 {
aspect_ratio = av_make_q(1, 1);
}
aspect_ratio = av_mul_q(aspect_ratio, av_make_q(pic_width as c_int, pic_height as c_int));
// XXX: we suppose the screen has a 1.0 pixel ratio
height = scr_height;
width = unsafe { av_rescale(height, aspect_ratio.num.into(), aspect_ratio.den.into()) & !1 };
if width > scr_width {
width = scr_width;
height = unsafe { av_rescale(width, aspect_ratio.den.into(), aspect_ratio.num.into()) & !1 };
}
x = (scr_width - width) / 2;
y = (scr_height - height) / 2;
rect.position.x = (scr_xleft + x) as real;
rect.position.y = (scr_ytop + y) as real;
rect.size.x = ffmax!((width as f32), 1.0);
rect.size.y = ffmax!((height as f32), 1.0);
}
struct VideoStreamPlayback {
video_state : VideoState,
vid_texture : Box<ImageTexture>,
image_buffer : Vec<u8>,
transfer_frame: Option<Box<AVFrame>>,
sws_flag: usize,
}
impl VideoStreamPlayback {
fn video_image_display(&mut self) {
let mut vp = self.video_state.pictq.peek_last().as_mut().unwrap(); //Fixme
let mut sp = self.video_state.subpq.peek();
let mut rect: Rect2 = Rect2::default(); //Fixme
// if self.subtitle_st.is_some() {
// if let Some(sp) = sp {
// if vp.pts >= (*sp).pts + ((*sp).sub.unwrap().start_display_time as f32 / 1000.0) { // FIXME
// if !(*sp).uploaded {
// let mut pixels: [*mut u8; 4] = [std::ptr::null_mut(); 4];
// let mut pitch: [i32; 4] = [0; 4];
// let mut i: i32;
// if sp.width == 0 || sp.height == 0 {
// sp.width = vp.width;
// sp.height = vp.height;
// }
//
// // if (realloc_texture(&vstate->sub_texture, SDL_PIXELFORMAT_ARGB8888, sp->width, sp->height, SDL_BLENDMODE_BLEND, 1) < 0)
// // return;
//
// for i in 0..sp.sub.unwrap().num_rects { // FIXME
// let sub_rect: *mut AVSubtitleRect = sp.sub.unwrap().rects[i as usize]; // FIXME
//
// unsafe {
//
// (*sub_rect).x = av_clip((*sub_rect).x, 0, sp.width);
// (*sub_rect).y = av_clip((*sub_rect).y, 0, sp.height);
// (*sub_rect).w = av_clip((*sub_rect).w, 0, sp.width - (*sub_rect).x);
// (*sub_rect).h = av_clip((*sub_rect).h, 0, sp.height - (*sub_rect).y);
//
// self.sub_convert_ctx = sws_getCachedContext(
// self.sub_convert_ctx,
// (*sub_rect).w,
// (*sub_rect).h,
// AVPixelFormat::AV_PIX_FMT_PAL8,
// (*sub_rect).w,
// (*sub_rect).h,
// AVPixelFormat::AV_PIX_FMT_BGRA,
// 0,
// null_mut(),
// null_mut(),
// null_mut(),
// );
// if self.vstate.sub_convert_ctx.is_null() {
// av_log(
// null_mut(),
// AV_LOG_FATAL,
// "Cannot initialize the conversion context\n\0".as_ptr()
// );
// return;
// }
// }
// println!("sws_scale!");
// // if !SDL_LockTexture(vstate->sub_texture, (SDL_Rect *)sub_rect, (void **)pixels, pitch) {
// // sws_scale(vstate->sub_convert_ctx, (const uint8_t * const *)sub_rect->data, sub_rect->linesize,
// // 0, sub_rect->h, pixels, pitch);
// // SDL_UnlockTexture(vstate->sub_texture);
// // }
// }
// (*sp).uploaded = 1;
// }
// } else {
// sp = &None;
// }
// }
// }
calculate_display_rect(&mut rect, self.video_state.xleft as i64, self.video_state.ytop as i64, self.video_state.width as i64, self.video_state.height as i64, vp.width as i64, vp.height as i64, vp.sar);
if !vp.uploaded {
if self.upload_texture(&vp.frame, &self.video_state.img_convert_ctx).is_err() {
return;
}
vp.uploaded = true;
vp.flip_v = vp.frame.linesize[0] < 0;
}
// if (sp) {
// int i;
// double xratio = (double) rect.size.x / (double) sp->width;
// double yratio = (double) rect.size.y / (double) sp->height;
// for (i = 0; i < sp->sub.num_rects; i++) {
// }
// }
}
fn upload_texture(&mut self, frame: &AVFrame, img_convert_ctx: &SwsContext ) -> Result<(), RsmpegError> {
let mut ret: c_int = 0;
let mut godot_pix_fmt: c_int = 0;
// get_godot_pix_fmt((*frame).format, &mut godot_pix_fmt);
let godot_pix_fmt = Self::convert_px_format_godot(frame.format);
let mut init_texture: bool = false;
let use_conversion: bool = godot_pix_fmt == Format::FORMAT_MAX;
//if !self.vid_texture.as_ref().is_valid() { // FIXME ???
// self.vid_texture.as_ref().instance();
//}
if self.vid_texture.get_width() != (*frame).width.into() || self.vid_texture.get_height() != (*frame).height.into() {
init_texture = true;
}
if init_texture {
// self.vid_texture.create((*frame).width, (*frame).height, Image::FORMAT_RGBA8, Texture::FLAG_FILTER | Texture::FLAG_VIDEO_SURFACE); // FIXME
println!("ffplay::upload_texture, texture allocated: {}x{}", (*frame).width, (*frame).height);
let image_buffer_size: c_int = av_image_get_buffer_size(AVPixelFormat_AV_PIX_FMT_RGBA, (*frame).width, (*frame).height, 1);
self.image_buffer.resize(image_buffer_size as usize, 0);
if self.transfer_frame.is_some() {
self.transfer_frame = None
//av_freep(&mut self.transfer_frame.data[0]);
//av_frame_free(&mut transfer_frame);
}
if use_conversion {
self.transfer_frame = Some(Box::from(AVFrame::new()));
self.transfer_frame.unwrap().format = AVPixelFormat_AV_PIX_FMT_RGBA;
self.transfer_frame.unwrap().width = frame.width;
self.transfer_frame.unwrap().height = frame.height;
let image = AVImage::new(
self.transfer_frame.unwrap().format,
self.transfer_frame.unwrap().width,
self.transfer_frame.unwrap().height,
32 );
// ret = av_image_alloc(
// self.transfer_frame.unwrap().data,
// self.transfer_frame.unwrap().linesize,
// self.transfer_frame.unwrap().width,
// self.transfer_frame.unwrap().height,
// self.transfer_frame.unwrap().format,
// 32,
// );
if image.is_none(){
eprintln!("Could not allocate raw picture buffer");
}
// println!(
// "converting pixel format from {} to {}",
// av_get_pix_fmt_name(AVPixelFormat((*frame).format)),
// av_get_pix_fmt_name(AVPixelFormat_AV_PIX_FMT_RGBA)
// );
}
// else {
// println!(
// "using direct pixel format: {}",
// av_get_pix_fmt_name(AVPixelFormat((*frame).format))
// );
// }
}
if !use_conversion {
let mut img = Image::new();
img.set_data((*frame).width.into(), (*frame).height.into(), false, Format::FORMAT_RGBA8, self.image_buffer.as_slice().into());
*self.vid_texture= *ImageTexture::create_from_image(img).unwrap();
}
// else {
// // This should only happen if we are not using avfilter...
// *img_convert_ctx = sws_getCachedContext(
// *img_convert_ctx,
// (*frame).width,
// (*frame).height,
// AVPixelFormat((*frame).format),
// (*frame).width,
// (*frame).height,
// AVPixelFormat::AV_PIX_FMT_RGBA,
// self.sws_flags,
// null_mut(),
// null_mut(),
// null_mut(),
// );
//
// if !(*img_convert_ctx).is_null() {
// sws_scale(
// *img_convert_ctx,
// (*frame).data,
// (*frame).linesize,
// 0,
// (*frame).height,
// self.transfer_frame.data,
// self.transfer_frame.linesize,
// );
//
// let pixels: *mut u8 = self.image_buffer.write().ptr();
// let fdata: *mut *mut u8 = self.transfer_frame.data;
// let lines: c_int = self.transfer_frame.linesize[0];
// for y in 0..self.transfer_frame.height {
// let src_slice: &[u8] = unsafe {
// slice::from_raw_parts((*(fdata.offset(0))).offset((y * lines) as isize), self.transfer_frame.width as usize * 4)
// };
// let dest_slice: &mut [u8] = unsafe {
// slice::from_raw_parts_mut(pixels.offset((y * self.transfer_frame.width * 4) as isize), self.transfer_frame.width as usize * 4)
// };
// dest_slice.copy_from_slice(src_slice);
// }
// let img = Image::new((*frame).width, (*frame).height, 0, Image::FORMAT_RGBA8, self.image_buffer);
// self.vid_texture.set_data(img);
// } else {
// av_log(
// ptr::null_mut(),
// AV_LOG_FATAL,
// "Cannot initialize the conversion context\n\0".as_ptr() as *const i8,
// );
// ret = -1;
// }
// }
Ok(())
}
fn convert_px_format_godot(frmt: AVPixelFormat ) -> Format {
match frmt {
AVPixelFormat_AV_PIX_FMT_RGB8 => Format::FORMAT_RGB8,
AVPixelFormat_AV_PIX_FMT_RGBA => Format::FORMAT_RGBA8,
AVPixelFormat_AV_PIX_FMT_RGB24 => Format::FORMAT_RGB8,
AVPixelFormat_AV_PIX_FMT_RGB0 => Format::FORMAT_RGB8,
_ => Format::FORMAT_MAX
}
}
}

21
src/playback/mutex.rs Normal file
View File

@ -0,0 +1,21 @@
use std::sync::{Arc, Condvar, Mutex};
pub trait New {
fn new() -> Self;
}
pub struct MutexQueue<T : New> {
pub mutex : Arc<Mutex<()>>,
pub cond: Arc<Condvar>,
pub data: T
}
impl <T : New> MutexQueue<T> {
fn new() -> MutexQueue<T> {
MutexQueue {
mutex : Arc::new(Mutex::new(())),
cond: Arc::new(Condvar::new()),
data: T::new()
}
}
}

194
src/playback/packet.rs Normal file
View File

@ -0,0 +1,194 @@
use std::collections::VecDeque;
use rsmpeg::error::RsmpegError;
use rsmpeg::error::RsmpegError::AVError;
use std::sync::Condvar;
use rsmpeg::avcodec::AVPacket;
use crate::playback::mutex::{MutexQueue, New};
pub struct MyAVPacketList {
pub pkt : Box<AVPacket>,
pub next: Option<Box<MyAVPacketList>>,
pub serial: i64,
}
impl Default for MyAVPacketList {
fn default() -> Self {
MyAVPacketList {
pkt: Default::default(),
next: Default::default(),
serial: Default::default(),
}
}
}
impl MyAVPacketList {
fn new(pkt: AVPacket, serial: i64) -> Self {
MyAVPacketList {
pkt: Box::new(pkt),
next: None,
serial,
}
}
}
pub type MutexPacketQueue = MutexQueue<PacketQueue>;
pub struct PacketQueue {
pub queue : VecDeque<MyAVPacketList>,
pub size: i64,
pub duration: i64,
pub abort_request: bool,
pub serial: i64,
}
impl New for PacketQueue {
fn new() -> PacketQueue {
PacketQueue {
queue: VecDeque::new(),
size: 0,
duration: 0,
abort_request: false,
serial: 0,
}
}
}
impl PacketQueue {
/**
* Put the given AVPacket in the given PacketQueue.
*
* @param queue the queue to be used for the insert
* @param packet the AVPacket to be inserted in the queue
*
* @return 0 if the AVPacket is correctly inserted in the given PacketQueue.
*/
fn put(&mut self, cond: &Condvar, packet: AVPacket) -> Result<(), RsmpegError>
{
if self.abort_request
{
return Err(AVError(-1));
}
let mut pkt = MyAVPacketList::new(packet, self.serial);
let pkt_duration = pkt.pkt.duration;
let pkt_size : i64 = pkt.pkt.size.into();
//if (packet == &flush_pkt) // TODO
// queue->serial++;
self.queue.push_back(pkt);
// queue->size += pkt1->pkt.size + sizeof(*pkt1);
// queue->duration += pkt1->pkt.duration;
self.size += pkt_size; // TODO
self.duration += pkt_duration;
/* XXX: should duplicate packet data in DV case */
cond.notify_one();
return Ok(());
}
}
impl MutexPacketQueue {
pub fn put_nullpacket(&mut self, stream_index: i64) -> Result<(), RsmpegError>
{
let mut pkt = AVPacket::new();
pkt.set_stream_index(stream_index as i32);
self.put(pkt)
}
/// Puts the given AVPacket in the given PacketQueue.
///
/// param : `q` the packet queue to be used for the insert
/// param : `packet` the AVPacket to be inserted in the queue
///
/// return : true if the AVPacket is correctly inserted in the given PacketQueue.
fn put(&mut self, packet: AVPacket) -> Result<(), RsmpegError>
{
//SDL_LockMutex(queue->mutex);
let _pq = self.mutex.lock().unwrap();
let ret = self.data.put(&self.cond, packet);
//SDL_UnlockMutex(queue->mutex);
// if (packet != &flush_pkt && ret < 0) // TODO
// av_packet_unref(packet);
ret
}
/// Flush the given packet queue
fn flush(&mut self)
{
//SDL_LockMutex(q->mutex);
let _m = self.mutex.lock().unwrap();
let pq = &mut self.data;
pq.queue.clear();
pq.size = 0;
pq.duration = 0;
pq.serial += pq.serial;
//SDL_UnlockMutex(q->mutex);
}
fn destroy(&mut self)
{
self.flush();
}
fn abort(q: &mut MutexPacketQueue)
{
//SDL_LockMutex(q->mutex);
let _m = q.mutex.lock().unwrap();
let pq = &mut q.data;
pq.abort_request = true;
//SDL_CondSignal(q->cond);
q.cond.notify_one()
//SDL_UnlockMutex(q->mutex);
}
fn start(q: &mut MutexPacketQueue)
{
//SDL_LockMutex(q->mutex);
let _m = q.mutex.lock().unwrap();
let pq = &mut q.data;
pq.abort_request = false;
pq.serial += 1;
//SDL_UnlockMutex(q->mutex);
}
pub fn get(&mut self, packet: &mut AVPacket, block : bool, serial: Option<&mut Box<i64>>) -> Result<(),RsmpegError>
{
// SDL_LockMutex(q->mutex);
let mut m = self.mutex.lock().unwrap();
let mut pq = &mut self.data;
let mut ret = Ok(());
loop {
if pq.abort_request {
ret = Err(AVError(-1));
break;
}
let pkt_opt = pq.queue.pop_front();
if let Some (pkt) = pkt_opt {
pq.size = pkt.pkt.as_ref().size.into();
*packet = *pkt.pkt;
if let Some(mut s) = serial {
*s = Box::new(pkt.serial)
}
ret = Ok(());
break;
} else if block {
ret = Ok(());
break;
} else {
m = self.cond.wait(m).unwrap();
}
}
ret
}
}

5
src/stream.rs Normal file
View File

@ -0,0 +1,5 @@
pub struct Stream {
pub cur_time : u64
}

BIN
tests/assets/video1.mp4 Normal file

Binary file not shown.