use integer frame numbers instead of floating-point time values in ffmpeg movies, for robustness

This commit is contained in:
David Rose
2011-11-15 22:34:43 +00:00
parent 3f2b3ca4b5
commit b4e931322f
13 changed files with 444 additions and 369 deletions
+11 -20
View File
@@ -351,42 +351,33 @@ cull_callback(CullTraverser *, const CullTraverserData &) const {
CDReader cdata(_cycler);
double offset;
int true_loop_count = 1;
if (cdata->_synchronize != 0) {
offset = cdata->_synchronize->get_time();
} else {
// Calculate the cursor position modulo the length of the movie.
double now = ClockObject::get_global_clock()->get_frame_time();
double clock = cdata->_clock;
offset = cdata->_clock;
if (cdata->_playing) {
clock += now * cdata->_play_rate;
}
int true_loop_count = cdata->_loops_total;
if (true_loop_count <= 0) {
true_loop_count = 1000000000;
}
if (clock >= cdata->_video_length * true_loop_count) {
offset = cdata->_video_length;
} else {
offset = fmod(clock, cdata->_video_length);
offset += now * cdata->_play_rate;
}
true_loop_count = cdata->_loops_total;
}
for (int i=0; i<((int)(cdata->_pages.size())); i++) {
MovieVideoCursor *color = cdata->_pages[i]._color;
MovieVideoCursor *alpha = cdata->_pages[i]._alpha;
if (color && alpha) {
if ((offset >= color->next_start())||
((offset < color->last_start()) && (color->can_seek()))) {
color->fetch_into_texture_rgb(offset, (MovieTexture*)this, i);
if (color->set_time(offset, true_loop_count)) {
color->fetch_into_texture_rgb((MovieTexture*)this, i);
}
if ((offset >= alpha->next_start())||
((offset < alpha->last_start()) && (alpha->can_seek()))) {
alpha->fetch_into_texture_alpha(offset, (MovieTexture*)this, i, cdata_tex->_alpha_file_channel);
if (alpha->set_time(offset, true_loop_count)) {
alpha->fetch_into_texture_alpha((MovieTexture*)this, i, cdata_tex->_alpha_file_channel);
}
} else if (color) {
if ((offset >= color->next_start())||
((offset < color->last_start()) && (color->can_seek()))) {
color->fetch_into_texture(offset, (MovieTexture*)this, i);
bool result = color->set_time(offset, true_loop_count);
if (result) {
color->fetch_into_texture((MovieTexture*)this, i);
}
}
}
+13
View File
@@ -12,3 +12,16 @@
//
////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////
// Function: FfmpegVideoCursor::FfmpegBuffer::Constructor
// Access: Public
// Description:
////////////////////////////////////////////////////////////////////
INLINE FfmpegVideoCursor::FfmpegBuffer::
FfmpegBuffer(size_t block_size) :
Buffer(block_size),
_begin_frame(-1),
_end_frame(0)
{
}
+273 -162
View File
@@ -49,7 +49,7 @@ FfmpegVideoCursor() :
_lock("FfmpegVideoCursor::_lock"),
_action_cvar(_lock),
_thread_status(TS_stopped),
_seek_time(0.0),
_seek_frame(0),
_packet0(NULL),
_packet1(NULL),
_format_ctx(NULL),
@@ -58,7 +58,7 @@ FfmpegVideoCursor() :
_video_index(-1),
_frame(NULL),
_frame_out(NULL),
_min_fseek(3.0)
_eof_known(false)
{
}
@@ -103,10 +103,14 @@ init_from(FfmpegVideo *source) {
memset(_packet0, 0, sizeof(AVPacket));
memset(_packet1, 0, sizeof(AVPacket));
fetch_packet(0.0);
fetch_packet(0);
_initial_dts = _packet0->dts;
fetch_frame(-1);
_current_frame = -1;
_eof_known = false;
_eof_frame = 0;
#ifdef HAVE_THREADS
set_max_readahead_frames(ffmpeg_max_readahead_frames);
#endif // HAVE_THREADS
@@ -124,7 +128,7 @@ FfmpegVideoCursor(FfmpegVideo *src) :
_lock("FfmpegVideoCursor::_lock"),
_action_cvar(_lock),
_thread_status(TS_stopped),
_seek_time(0.0),
_seek_frame(0),
_packet0(NULL),
_packet1(NULL),
_format_ctx(NULL),
@@ -133,7 +137,7 @@ FfmpegVideoCursor(FfmpegVideo *src) :
_video_index(-1),
_frame(NULL),
_frame_out(NULL),
_min_fseek(3.0)
_eof_known(false)
{
init_from(src);
}
@@ -299,14 +303,7 @@ stop_thread() {
// else starts the thread up again.
MutexHolder holder(_lock);
Buffers::iterator bi;
for (bi = _readahead_frames.begin(); bi != _readahead_frames.end(); ++bi) {
internal_free_buffer(*bi);
}
_readahead_frames.clear();
for (bi = _recycled_frames.begin(); bi != _recycled_frames.end(); ++bi) {
internal_free_buffer(*bi);
}
_recycled_frames.clear();
}
@@ -322,13 +319,52 @@ is_thread_started() const {
return (_thread_status != TS_stopped);
}
////////////////////////////////////////////////////////////////////
// Function: FfmpegVideoCursor::set_time
// Access: Published, Virtual
// Description: See MovieVideoCursor::set_time().
////////////////////////////////////////////////////////////////////
bool FfmpegVideoCursor::
set_time(double time, int loop_count) {
int frame = (int)(time / _video_timebase + 0.5);
if (_eof_known) {
if (loop_count == 0) {
frame = frame % _eof_frame;
} else {
int last_frame = _eof_frame * loop_count;
if (frame < last_frame) {
frame = frame % _eof_frame;
} else {
frame = _eof_frame - 1;
}
}
}
if (ffmpeg_cat.is_spam() /* && frame != _current_frame*/) {
ffmpeg_cat.spam()
<< "set_time(" << time << "): " << frame << ", loop_count = " << loop_count << "\n";
}
_current_frame = frame;
if (_current_frame_buffer != NULL) {
// If we've previously returned a frame, don't bother asking for a
// next one if that frame is still valid.
return (_current_frame >= _current_frame_buffer->_end_frame ||
_current_frame < _current_frame_buffer->_begin_frame);
}
// If our last request didn't return a frame, try again.
return true;
}
////////////////////////////////////////////////////////////////////
// Function: FfmpegVideoCursor::fetch_buffer
// Access: Public, Virtual
// Description: See MovieVideoCursor::fetch_buffer.
////////////////////////////////////////////////////////////////////
MovieVideoCursor::Buffer *FfmpegVideoCursor::
fetch_buffer(double time) {
PT(MovieVideoCursor::Buffer) FfmpegVideoCursor::
fetch_buffer() {
MutexHolder holder(_lock);
// If there was an error at any point, just return NULL.
@@ -336,10 +372,10 @@ fetch_buffer(double time) {
return NULL;
}
Buffer *frame = NULL;
PT(FfmpegBuffer) frame;
if (_thread_status == TS_stopped) {
// Non-threaded case. Just get the next frame directly.
fetch_time(time);
advance_to_frame(_current_frame);
if (_frame_ready) {
frame = do_alloc_frame();
export_frame(frame);
@@ -352,46 +388,70 @@ fetch_buffer(double time) {
frame = _readahead_frames.front();
_readahead_frames.pop_front();
_action_cvar.notify();
while (frame->_end_time < time && !_readahead_frames.empty()) {
while (frame->_end_frame < _current_frame && !_readahead_frames.empty()) {
// This frame is too old. Discard it.
if (ffmpeg_cat.is_debug()) {
ffmpeg_cat.debug()
<< "ffmpeg for " << _filename.get_basename()
<< " at time " << time << ", discarding frame at "
<< frame->_begin_time << "\n";
<< " at frame " << _current_frame << ", discarding frame at "
<< frame->_begin_frame << "\n";
}
do_recycle_frame(frame);
frame = _readahead_frames.front();
_readahead_frames.pop_front();
}
if (frame->_begin_time > time) {
if (frame->_begin_frame > _current_frame) {
// This frame is too new. Empty all remaining frames and seek
// backwards.
if (ffmpeg_cat.is_debug()) {
ffmpeg_cat.debug()
<< "ffmpeg for " << _filename.get_basename()
<< " at frame " << _current_frame << ", encountered too-new frame at "
<< frame->_begin_frame << "\n";
}
do_recycle_all_frames();
if (_thread_status == TS_wait || _thread_status == TS_seek || _thread_status == TS_readahead) {
_thread_status = TS_seek;
_seek_time = time;
_seek_frame = _current_frame;
_action_cvar.notify();
}
}
}
if (frame == NULL || frame->_end_time < time) {
if (frame == NULL || frame->_end_frame < _current_frame) {
// No frame available, or the frame is too old. Seek.
if (_thread_status == TS_wait || _thread_status == TS_seek || _thread_status == TS_readahead) {
_thread_status = TS_seek;
_seek_time = time;
_seek_frame = _current_frame;
_action_cvar.notify();
}
}
}
if (frame != NULL && (frame->_end_time < time || frame->_begin_time > time)) {
if (frame != NULL && (frame->_end_frame < _current_frame || frame->_begin_frame > _current_frame)) {
// The frame is too old or too new. Just recycle it.
do_recycle_frame(frame);
frame = NULL;
}
return frame;
if (frame != NULL) {
if (_current_frame_buffer != NULL) {
do_recycle_frame(_current_frame_buffer);
}
_current_frame_buffer = frame;
if (ffmpeg_cat.is_debug()) {
ffmpeg_cat.debug()
<< "ffmpeg for " << _filename.get_basename()
<< " at frame " << _current_frame << ", returning frame at "
<< frame->_begin_frame << "\n";
}
} else {
if (ffmpeg_cat.is_debug()) {
ffmpeg_cat.debug()
<< "ffmpeg for " << _filename.get_basename()
<< " at frame " << _current_frame << ", returning NULL\n";
}
}
return frame.p();
}
////////////////////////////////////////////////////////////////////
@@ -403,8 +463,18 @@ fetch_buffer(double time) {
////////////////////////////////////////////////////////////////////
void FfmpegVideoCursor::
release_buffer(Buffer *buffer) {
MutexHolder holder(_lock);
do_recycle_frame(buffer);
}
////////////////////////////////////////////////////////////////////
// Function: FfmpegVideoCursor::make_new_buffer
// Access: Protected, Virtual
// Description: May be called by a derived class to allocate a new
// Buffer object.
////////////////////////////////////////////////////////////////////
PT(MovieVideoCursor::Buffer) FfmpegVideoCursor::
make_new_buffer() {
PT(FfmpegBuffer) frame = new FfmpegBuffer(size_x() * size_y() * get_num_components());
return frame.p();
}
////////////////////////////////////////////////////////////////////
@@ -424,7 +494,7 @@ open_stream() {
if (!_source->get_subfile_info().is_empty()) {
// Read a subfile.
if (!_ffvfile.open_subfile(_source->get_subfile_info())) {
movies_cat.info()
ffmpeg_cat.info()
<< "Couldn't open " << _source->get_subfile_info() << "\n";
close_stream();
return false;
@@ -433,7 +503,7 @@ open_stream() {
} else {
// Read a filename.
if (!_ffvfile.open_vfs(_filename)) {
movies_cat.info()
ffmpeg_cat.info()
<< "Couldn't open " << _filename << "\n";
close_stream();
return false;
@@ -445,7 +515,7 @@ open_stream() {
nassertr(_format_ctx != NULL, false);
if (av_find_stream_info(_format_ctx) < 0) {
movies_cat.info()
ffmpeg_cat.info()
<< "Couldn't find stream info\n";
close_stream();
return false;
@@ -458,11 +528,12 @@ open_stream() {
_video_index = i;
_video_ctx = _format_ctx->streams[i]->codec;
_video_timebase = av_q2d(_format_ctx->streams[i]->time_base);
_min_fseek = (int)(3.0 / _video_timebase);
}
}
if (_video_ctx == NULL) {
movies_cat.info()
ffmpeg_cat.info()
<< "Couldn't find video_ctx\n";
close_stream();
return false;
@@ -470,13 +541,13 @@ open_stream() {
AVCodec *pVideoCodec = avcodec_find_decoder(_video_ctx->codec_id);
if (pVideoCodec == NULL) {
movies_cat.info()
ffmpeg_cat.info()
<< "Couldn't find codec\n";
close_stream();
return false;
}
if (avcodec_open(_video_ctx, pVideoCodec) < 0) {
movies_cat.info()
ffmpeg_cat.info()
<< "Couldn't open codec\n";
close_stream();
return false;
@@ -485,10 +556,9 @@ open_stream() {
_size_x = _video_ctx->width;
_size_y = _video_ctx->height;
_num_components = 3; // Don't know how to implement RGBA movies yet.
_length = (_format_ctx->duration * 1.0) / AV_TIME_BASE;
_length = (double)_format_ctx->duration / (double)AV_TIME_BASE;
_can_seek = true;
_can_seek_fast = true;
_eof_reached = false;
return true;
}
@@ -581,8 +651,8 @@ st_thread_main(void *self) {
void FfmpegVideoCursor::
thread_main() {
MutexHolder holder(_lock);
if (ffmpeg_cat.is_debug()) {
ffmpeg_cat.debug()
if (ffmpeg_cat.is_spam()) {
ffmpeg_cat.spam()
<< "ffmpeg thread for " << _filename.get_basename() << " starting.\n";
}
@@ -600,8 +670,8 @@ thread_main() {
}
_thread_status = TS_stopped;
if (ffmpeg_cat.is_debug()) {
ffmpeg_cat.debug()
if (ffmpeg_cat.is_spam()) {
ffmpeg_cat.spam()
<< "ffmpeg thread for " << _filename.get_basename() << " stopped.\n";
}
}
@@ -630,7 +700,7 @@ do_poll() {
case TS_readahead:
if ((int)_readahead_frames.size() < _max_readahead_frames) {
// Time to read the next frame.
Buffer *frame = do_alloc_frame();
PT(FfmpegBuffer) frame = do_alloc_frame();
nassertr(frame != NULL, false);
_lock.release();
fetch_frame(-1);
@@ -650,14 +720,14 @@ do_poll() {
return false;
case TS_seek:
// Seek to a specific time.
// Seek to a specific frame.
{
double seek_time = _seek_time;
int seek_frame = _seek_frame;
_thread_status = TS_seeking;
Buffer *frame = do_alloc_frame();
PT(FfmpegBuffer) frame = do_alloc_frame();
nassertr(frame != NULL, false);
_lock.release();
fetch_time(seek_time);
advance_to_frame(seek_frame);
if (_frame_ready) {
export_frame(frame);
_lock.acquire();
@@ -691,14 +761,15 @@ do_poll() {
// previously-recycled object. Assumes the lock is
// held.
////////////////////////////////////////////////////////////////////
MovieVideoCursor::Buffer *FfmpegVideoCursor::
PT(FfmpegVideoCursor::FfmpegBuffer) FfmpegVideoCursor::
do_alloc_frame() {
if (!_recycled_frames.empty()) {
Buffer *frame = _recycled_frames.front();
PT(FfmpegBuffer) frame = _recycled_frames.front();
_recycled_frames.pop_front();
return frame;
}
return internal_alloc_buffer();
PT(Buffer) buffer = make_new_buffer();
return (FfmpegBuffer *)buffer.p();
}
////////////////////////////////////////////////////////////////////
@@ -708,7 +779,7 @@ do_alloc_frame() {
// future reuse. Assumes the lock is held.
////////////////////////////////////////////////////////////////////
void FfmpegVideoCursor::
do_recycle_frame(Buffer *frame) {
do_recycle_frame(FfmpegBuffer *frame) {
_recycled_frames.push_back(frame);
}
@@ -721,12 +792,12 @@ do_recycle_frame(Buffer *frame) {
void FfmpegVideoCursor::
do_recycle_all_frames() {
while (!_readahead_frames.empty()) {
Buffer *frame = _readahead_frames.front();
PT(FfmpegBuffer) frame = _readahead_frames.front();
_readahead_frames.pop_front();
if (ffmpeg_cat.is_debug()) {
ffmpeg_cat.debug()
if (ffmpeg_cat.is_spam()) {
ffmpeg_cat.spam()
<< "ffmpeg for " << _filename.get_basename()
<< " recycling frame at " << frame->_begin_time << "\n";
<< " recycling frame at " << frame->_begin_frame << "\n";
}
_recycled_frames.push_back(frame);
}
@@ -736,32 +807,42 @@ do_recycle_all_frames() {
// Function: FfmpegVideoCursor::fetch_packet
// Access: Private
// Description: Called within the sub-thread. Fetches a video packet
// and stores it in the packet0 buffer. Sets packet_time
// and stores it in the packet0 buffer. Sets packet_frame
// to the packet's timestamp. If a packet could not be
// read, the packet is cleared and the packet_time is
// read, the packet is cleared and the packet_frame is
// set to the specified default value. Returns true on
// failure (such as the end of the video), or false on
// success.
////////////////////////////////////////////////////////////////////
bool FfmpegVideoCursor::
fetch_packet(double default_time) {
fetch_packet(int default_frame) {
if (_packet0->data) {
av_free_packet(_packet0);
}
while (av_read_frame(_format_ctx, _packet0) >= 0) {
if (_packet0->stream_index == _video_index) {
_packet_time = _packet0->dts * _video_timebase;
_packet_frame = _packet0->dts;
return false;
}
av_free_packet(_packet0);
}
_packet0->data = 0;
_packet_time = default_time;
_eof_reached = false;
if (ffmpeg_cat.is_debug()) {
ffmpeg_cat.debug()
<< "end of video\n";
if (!_eof_known && default_frame != 0) {
_eof_frame = _packet_frame;
_eof_known = true;
}
if (ffmpeg_cat.is_spam()) {
if (_eof_known) {
ffmpeg_cat.spam()
<< "end of video at frame " << _eof_frame << "\n";
} else {
ffmpeg_cat.spam()
<< "end of video\n";
}
}
_packet_frame = default_frame;
return true;
}
@@ -781,33 +862,35 @@ flip_packets() {
// Function: FfmpegVideoCursor::fetch_frame
// Access: Private
// Description: Called within the sub-thread. Slides forward until
// the indicated time, then fetches a frame from the
// the indicated frame, then fetches a frame from the
// stream and stores it in the frame buffer. Sets
// _begin_time and _end_time to indicate the extents of
// the frame. Returns true if the end of the video is
// reached.
// _begin_frame and _end_frame to indicate the extents of
// the frame. Sets _frame_ready true to indicate a
// frame is now available, or false if it is not (for
// instance, because the end of the video was reached).
////////////////////////////////////////////////////////////////////
bool FfmpegVideoCursor::
fetch_frame(double time) {
void FfmpegVideoCursor::
fetch_frame(int frame) {
static PStatCollector fetch_buffer_pcollector("*:FFMPEG Video Decoding:Fetch");
PStatTimer timer(fetch_buffer_pcollector);
int finished = 0;
if (_packet_time <= time) {
if (_packet_frame <= frame) {
_video_ctx->skip_frame = AVDISCARD_BIDIR;
// Put the current packet aside in case we discover it's the
// packet to keep.
flip_packets();
// Get the next packet. The first packet beyond the time we're
// Get the next packet. The first packet beyond the frame we're
// looking for marks the point to stop.
_begin_time = _packet_time;
if (fetch_packet(time)) {
_begin_frame = _packet_frame;
if (fetch_packet(frame)) {
_end_frame = _packet_frame;
_frame_ready = false;
return true;
return;
}
while (_packet_time <= time) {
while (_packet_frame <= frame) {
static PStatCollector seek_pcollector("*:FFMPEG Video Decoding:Seek");
PStatTimer timer(seek_pcollector);
@@ -819,10 +902,11 @@ fetch_frame(double time) {
avcodec_decode_video2(_video_ctx, _frame, &finished, _packet1);
#endif
flip_packets();
_begin_time = _packet_time;
if (fetch_packet(time)) {
_begin_frame = _packet_frame;
if (fetch_packet(frame)) {
_end_frame = _packet_frame;
_frame_ready = false;
return true;
return;
}
}
_video_ctx->skip_frame = AVDISCARD_DEFAULT;
@@ -839,7 +923,6 @@ fetch_frame(double time) {
} else {
// Just get the next frame.
_begin_time = _packet_time;
finished = 0;
while (!finished && _packet0->data) {
#if LIBAVCODEC_VERSION_INT < 3414272
@@ -848,28 +931,28 @@ fetch_frame(double time) {
#else
avcodec_decode_video2(_video_ctx, _frame, &finished, _packet0);
#endif
fetch_packet(_begin_time + 1.0);
_begin_frame = _packet_frame;
fetch_packet(_begin_frame + 1);
}
}
_end_time = _packet_time;
_end_frame = _packet_frame;
_frame_ready = true;
return false;
}
////////////////////////////////////////////////////////////////////
// Function: FfmpegVideoCursor::seek
// Access: Private
// Description: Called within the sub-thread. Seeks to a target
// location. Afterward, the packet_time is guaranteed
// to be less than or equal to the specified time.
// location. Afterward, the packet_frame is guaranteed
// to be less than or equal to the specified frame.
////////////////////////////////////////////////////////////////////
void FfmpegVideoCursor::
seek(double t, bool backward) {
seek(int frame, bool backward) {
static PStatCollector seek_pcollector("*:FFMPEG Video Decoding:Seek");
PStatTimer timer(seek_pcollector);
PN_int64 target_ts = (PN_int64)(t / _video_timebase);
PN_int64 target_ts = (PN_int64)frame;
if (target_ts < (PN_int64)(_initial_dts)) {
// Attempts to seek before the first packet will fail.
target_ts = _initial_dts;
@@ -880,15 +963,26 @@ seek(double t, bool backward) {
//reset_stream();
}
if (av_seek_frame(_format_ctx, _video_index, target_ts, flags) < 0) {
if (ffmpeg_cat.is_debug()) {
ffmpeg_cat.debug()
if (ffmpeg_cat.is_spam()) {
ffmpeg_cat.spam()
<< "Seek failure.\n";
}
reset_stream();
av_seek_frame(_format_ctx, _video_index, target_ts, 0);
if (backward) {
// Now try to seek forward.
reset_stream();
return seek(frame, false);
}
// Try a binary search to get a little closer.
if (binary_seek(_initial_dts, frame, frame, 1) < 0) {
if (ffmpeg_cat.is_spam()) {
ffmpeg_cat.spam()
<< "Seek double failure.\n";
}
reset_stream();
}
}
{
@@ -918,6 +1012,35 @@ seek(double t, bool backward) {
fetch_frame(-1);
}
////////////////////////////////////////////////////////////////////
// Function: FfmpegVideoCursor::binary_seek
// Access: Private
// Description: Casts about within the stream for a reasonably-close
// frame to seek to. We're trying to get as close as
// possible to target_frame.
////////////////////////////////////////////////////////////////////
int FfmpegVideoCursor::
binary_seek(int min_frame, int max_frame, int target_frame, int num_iterations) {
int try_frame = (min_frame + max_frame) / 2;
if (num_iterations > 5 || try_frame >= max_frame) {
// Success.
return 0;
}
if (av_seek_frame(_format_ctx, _video_index, try_frame, AVSEEK_FLAG_BACKWARD) < 0) {
// Failure. Try lower.
if (binary_seek(min_frame, try_frame - 1, target_frame, num_iterations + 1) < 0) {
return -1;
}
} else {
// Success. Try higher.
if (binary_seek(try_frame + 1, max_frame, target_frame, num_iterations + 1) < 0) {
return -1;
}
}
return 0;
}
////////////////////////////////////////////////////////////////////
// Function: FfmpegVideoCursor::reset_stream
// Access: Private
@@ -926,8 +1049,8 @@ seek(double t, bool backward) {
////////////////////////////////////////////////////////////////////
void FfmpegVideoCursor::
reset_stream() {
if (ffmpeg_cat.is_debug()) {
ffmpeg_cat.debug()
if (ffmpeg_cat.is_spam()) {
ffmpeg_cat.spam()
<< "Resetting ffmpeg stream.\n";
}
@@ -939,77 +1062,65 @@ reset_stream() {
return;
}
fetch_packet(0.0);
fetch_packet(0);
_initial_dts = _packet0->dts;
fetch_frame(-1);
}
////////////////////////////////////////////////////////////////////
// Function: FfmpegVideoCursor::fetch_time
// Function: FfmpegVideoCursor::advance_to_frame
// Access: Private
// Description: Called within the sub-thread. Advance until the
// specified time is in the export buffer.
// specified frame is in the export buffer.
////////////////////////////////////////////////////////////////////
void FfmpegVideoCursor::
fetch_time(double time) {
advance_to_frame(int frame) {
static PStatCollector fetch_buffer_pcollector("*:FFMPEG Video Decoding:Fetch");
PStatTimer timer(fetch_buffer_pcollector);
if (time < _begin_time) {
// Time is in the past.
if (_eof_reached) {
// Go ahead and reset the video when we back up after having
// reached the end. This avoids potential probelsm with
// unseekable streams.
if (ffmpeg_cat.is_debug()) {
ffmpeg_cat.debug()
<< "Resetting to " << time << " after eof\n";
if (frame < _begin_frame) {
// Frame is in the past.
if (ffmpeg_cat.is_spam()) {
ffmpeg_cat.spam()
<< "Seeking backward to " << frame << " from " << _begin_frame << "\n";
}
seek(frame, true);
if (_begin_frame > frame) {
if (ffmpeg_cat.is_spam()) {
ffmpeg_cat.spam()
<< "Ended up at " << _begin_frame << ", not far enough back!\n";
}
reset_stream();
} else {
if (ffmpeg_cat.is_debug()) {
ffmpeg_cat.debug()
<< "Seeking backward to " << time << " from " << _begin_time << "\n";
}
seek(time, true);
if (_begin_time > time) {
if (ffmpeg_cat.is_debug()) {
ffmpeg_cat.debug()
<< "Ended up at " << _begin_time << ", not far enough back!\n";
}
reset_stream();
if (ffmpeg_cat.is_debug()) {
ffmpeg_cat.debug()
<< "Reseek to 0, got " << _begin_time << "\n";
}
if (ffmpeg_cat.is_spam()) {
ffmpeg_cat.spam()
<< "Reseek to 0, got " << _begin_frame << "\n";
}
}
if (time < _begin_time) {
if (ffmpeg_cat.is_debug()) {
ffmpeg_cat.debug()
<< "Now sliding forward to " << time << " from " << _begin_time << "\n";
if (frame > _end_frame) {
if (ffmpeg_cat.is_spam()) {
ffmpeg_cat.spam()
<< "Now sliding forward to " << frame << " from " << _begin_frame << "\n";
}
fetch_frame(time);
fetch_frame(frame);
}
} else if (time < _end_time) {
// Time is in the present: already have the frame.
if (ffmpeg_cat.is_debug()) {
ffmpeg_cat.debug()
<< "Currently have " << time << "\n";
} else if (frame < _end_frame) {
// Frame is in the present: already have the frame.
if (ffmpeg_cat.is_spam()) {
ffmpeg_cat.spam()
<< "Currently have " << frame << " within " << _begin_frame << " .. " << _end_frame << "\n";
}
} else if (time < _end_time + _min_fseek) {
// Time is in the near future.
if (ffmpeg_cat.is_debug()) {
ffmpeg_cat.debug()
<< "Sliding forward to " << time << " from " << _begin_time << "\n";
} else if (frame < _end_frame + _min_fseek) {
// Frame is in the near future.
if (ffmpeg_cat.is_spam()) {
ffmpeg_cat.spam()
<< "Sliding forward to " << frame << " from " << _begin_frame << "\n";
}
fetch_frame(time);
fetch_frame(frame);
} else {
// Time is in the far future. Seek forward, then read.
// Frame is in the far future. Seek forward, then read.
// There's a danger here: because keyframes are spaced
// unpredictably, trying to seek forward could actually
// move us backward in the stream! This must be avoided.
@@ -1017,31 +1128,31 @@ fetch_time(double time) {
// us backward, we increase the minimum threshold distance
// for forward-seeking in the future.
if (ffmpeg_cat.is_debug()) {
ffmpeg_cat.debug()
<< "Jumping forward to " << time << " from " << _begin_time << "\n";
if (ffmpeg_cat.is_spam()) {
ffmpeg_cat.spam()
<< "Jumping forward to " << frame << " from " << _begin_frame << "\n";
}
double base = _begin_time;
seek(time, false);
if (_begin_time < base) {
_min_fseek += (base - _begin_time);
if (ffmpeg_cat.is_debug()) {
ffmpeg_cat.debug()
int base = _begin_frame;
seek(frame, false);
if (_begin_frame < base) {
_min_fseek += (base - _begin_frame);
if (ffmpeg_cat.is_spam()) {
ffmpeg_cat.spam()
<< "Wrong way! Increasing _min_fseek to " << _min_fseek << "\n";
}
}
if (time < _begin_time) {
if (ffmpeg_cat.is_debug()) {
ffmpeg_cat.debug()
<< "Correcting, sliding forward to " << time << " from " << _begin_time << "\n";
if (frame > _end_frame) {
if (ffmpeg_cat.is_spam()) {
ffmpeg_cat.spam()
<< "Correcting, sliding forward to " << frame << " from " << _begin_frame << "\n";
}
fetch_frame(time);
fetch_frame(frame);
}
}
if (ffmpeg_cat.is_debug()) {
ffmpeg_cat.debug()
<< "Wanted " << time << ", got " << _begin_time << "\n";
if (ffmpeg_cat.is_spam()) {
ffmpeg_cat.spam()
<< "Wanted " << frame << ", got " << _begin_frame << "\n";
}
}
@@ -1052,14 +1163,14 @@ fetch_time(double time) {
// of the frame buffer into the indicated target buffer.
////////////////////////////////////////////////////////////////////
void FfmpegVideoCursor::
export_frame(MovieVideoCursor::Buffer *buffer) {
export_frame(FfmpegBuffer *buffer) {
static PStatCollector export_frame_collector("*:FFMPEG Convert Video to BGR");
PStatTimer timer(export_frame_collector);
if (!_frame_ready) {
// No frame data ready, just fill with black.
if (ffmpeg_cat.is_debug()) {
ffmpeg_cat.debug()
if (ffmpeg_cat.is_spam()) {
ffmpeg_cat.spam()
<< "ffmpeg for " << _filename.get_basename()
<< ", no frame available.\n";
}
@@ -1069,8 +1180,8 @@ export_frame(MovieVideoCursor::Buffer *buffer) {
_frame_out->data[0] = buffer->_block + ((_size_y - 1) * _size_x * 3);
_frame_out->linesize[0] = _size_x * -3;
buffer->_begin_time = _begin_time;
buffer->_end_time = _end_time;
buffer->_begin_frame = _begin_frame;
buffer->_end_frame = _end_frame;
#ifdef HAVE_SWSCALE
nassertv(_convert_ctx != NULL && _frame != NULL && _frame_out != NULL);
sws_scale(_convert_ctx, _frame->data, _frame->linesize, 0, _size_y, _frame_out->data, _frame_out->linesize);
+30 -14
View File
@@ -61,9 +61,20 @@ PUBLISHED:
bool is_thread_started() const;
public:
virtual Buffer *fetch_buffer(double time);
virtual bool set_time(double time, int loop_count);
virtual PT(Buffer) fetch_buffer();
virtual void release_buffer(Buffer *buffer);
protected:
class FfmpegBuffer : public Buffer {
public:
INLINE FfmpegBuffer(size_t block_size);
int _begin_frame;
int _end_frame;
};
virtual PT(Buffer) make_new_buffer();
private:
bool open_stream();
void close_stream();
@@ -85,7 +96,7 @@ private:
// Condition: the thread has something to do.
ConditionVar _action_cvar;
typedef pdeque<Buffer *> Buffers;
typedef pdeque<PT(FfmpegBuffer) > Buffers;
Buffers _readahead_frames;
Buffers _recycled_frames;
enum ThreadStatus {
@@ -97,7 +108,10 @@ private:
TS_shutdown,
};
ThreadStatus _thread_status;
double _seek_time;
int _seek_frame;
int _current_frame;
PT(FfmpegBuffer) _current_frame_buffer;
private:
// The following functions will be called in the sub-thread.
@@ -105,21 +119,22 @@ private:
void thread_main();
bool do_poll();
Buffer *do_alloc_frame();
void do_recycle_frame(Buffer *frame);
PT(FfmpegBuffer) do_alloc_frame();
void do_recycle_frame(FfmpegBuffer *frame);
void do_recycle_all_frames();
bool fetch_packet(double default_time);
bool fetch_packet(int default_frame);
void flip_packets();
bool fetch_frame(double time);
void seek(double t, bool backward);
void fetch_time(double time);
void fetch_frame(int frame);
void seek(int frame, bool backward);
int binary_seek(int min_frame, int max_frame, int target_frame, int num_iterations);
void advance_to_frame(int frame);
void reset_stream();
void export_frame(Buffer *buffer);
void export_frame(FfmpegBuffer *buffer);
// The following data members will be accessed by the sub-thread.
AVPacket *_packet0, *_packet1;
double _packet_time;
int _packet_frame;
AVFormatContext *_format_ctx;
AVCodecContext *_video_ctx;
SwsContext *_convert_ctx;
@@ -131,10 +146,11 @@ private:
AVFrame *_frame_out;
int _initial_dts;
double _min_fseek;
double _begin_time;
double _end_time;
int _begin_frame;
int _end_frame;
bool _frame_ready;
bool _eof_reached;
bool _eof_known;
int _eof_frame;
public:
static void register_with_read_factory();
+25 -16
View File
@@ -67,7 +67,8 @@ InkblotVideoCursor(InkblotVideo *src) :
memset(_cells2, 255, padx * pady);
_can_seek = true;
_can_seek_fast = false;
_frames_read = 0;
_current_frame = 0;
_last_frame = -1;
}
////////////////////////////////////////////////////////////////////
@@ -81,33 +82,43 @@ InkblotVideoCursor::
delete[] _cells2;
}
////////////////////////////////////////////////////////////////////
// Function: InkblotVideoCursor::set_time
// Access: Published, Virtual
// Description: See MovieVideoCursor::set_time().
////////////////////////////////////////////////////////////////////
bool InkblotVideoCursor::
set_time(double time, int loop_count) {
int frame = (int)(time / _fps);
if (frame == _current_frame) {
return false;
}
_current_frame = frame;
return true;
}
////////////////////////////////////////////////////////////////////
// Function: InkblotVideoCursor::fetch_buffer
// Access: Published, Virtual
// Description: See MovieVideoCursor::fetch_buffer.
////////////////////////////////////////////////////////////////////
MovieVideoCursor::Buffer *InkblotVideoCursor::
fetch_buffer(double time) {
Buffer *buffer = get_standard_buffer();
PT(MovieVideoCursor::Buffer) InkblotVideoCursor::
fetch_buffer() {
PT(Buffer) buffer = get_standard_buffer();
int padx = size_x() + 2;
int pady = size_y() + 2;
if (time < _next_start) {
if (_current_frame < _last_frame) {
// Rewind to beginning.
memset(_cells, 255, padx * pady);
memset(_cells2, 255, padx * pady);
_last_start = -1.0;
_next_start = 0.0;
_frames_read = 0;
_last_frame = 0;
}
nassertr(time >= _next_start, NULL);
while (_next_start <= time) {
_last_start = (_frames_read * 1.0) / _fps;
_frames_read += 1;
_next_start = (_frames_read * 1.0) / _fps;
while (_last_frame <= _current_frame) {
++_last_frame;
for (int y=1; y<pady-1; y++) {
for (int x=1; x<padx-1; x++) {
int tot =
@@ -142,8 +153,6 @@ fetch_buffer(double time) {
}
}
buffer->_begin_time = _last_start;
buffer->_end_time = _next_start;
return buffer;
}
+4 -2
View File
@@ -32,13 +32,15 @@ class EXPCL_PANDA_MOVIES InkblotVideoCursor : public MovieVideoCursor {
virtual ~InkblotVideoCursor();
public:
virtual Buffer *fetch_buffer(double time);
virtual bool set_time(double time, int loop_count);
virtual PT(Buffer) fetch_buffer();
protected:
int _current_frame;
int _last_frame;
unsigned char *_cells;
unsigned char *_cells2;
int _fps;
int _frames_read;
public:
static TypeHandle get_class_type() {
+22 -28
View File
@@ -130,34 +130,6 @@ aborted() const {
return _aborted;
}
////////////////////////////////////////////////////////////////////
// Function: MovieVideoCursor::last_start
// Access: Published
// Description: Returns the start time of the last frame you read.
//
// MovieVideoCursor streams have variable frame rates. Each
// frame will specify how long it is to be displayed.
// These lengths may not be equal from frame to frame.
////////////////////////////////////////////////////////////////////
INLINE double MovieVideoCursor::
last_start() const {
return _last_start;
}
////////////////////////////////////////////////////////////////////
// Function: MovieVideoCursor::next_start
// Access: Published
// Description: Returns the start time of the next frame you can read.
//
// MovieVideoCursor streams have variable frame rates. Each
// frame will specify how long it is to be displayed.
// These lengths may not be equal from frame to frame.
////////////////////////////////////////////////////////////////////
INLINE double MovieVideoCursor::
next_start() const {
return _next_start;
}
////////////////////////////////////////////////////////////////////
// Function: MovieVideoCursor::streaming
// Access: Published
@@ -191,3 +163,25 @@ ready() const {
return _ready;
}
////////////////////////////////////////////////////////////////////
// Function: MovieVideoCursor::Buffer::Constructor
// Access: Public
// Description:
////////////////////////////////////////////////////////////////////
INLINE MovieVideoCursor::Buffer::
Buffer(size_t block_size) :
_block_size(block_size)
{
_block = (unsigned char *)PANDA_MALLOC_ARRAY(_block_size);
}
////////////////////////////////////////////////////////////////////
// Function: MovieVideoCursor::Buffer::Destructor
// Access: Public, Virtual
// Description:
////////////////////////////////////////////////////////////////////
INLINE MovieVideoCursor::Buffer::
~Buffer() {
PANDA_FREE_ARRAY(_block);
}
+45 -100
View File
@@ -38,12 +38,9 @@ MovieVideoCursor(MovieVideo *src) :
_can_seek(true),
_can_seek_fast(true),
_aborted(false),
_last_start(-1.0),
_next_start(0.0),
_streaming(false),
_ready(false)
{
_standard_buffer._block = NULL;
}
////////////////////////////////////////////////////////////////////
@@ -53,9 +50,6 @@ MovieVideoCursor(MovieVideo *src) :
////////////////////////////////////////////////////////////////////
MovieVideoCursor::
~MovieVideoCursor() {
if (_standard_buffer._block != NULL) {
PANDA_FREE_ARRAY(_standard_buffer._block);
}
}
////////////////////////////////////////////////////////////////////
@@ -78,16 +72,33 @@ setup_texture(Texture *tex) const {
tex->set_pad_size(fullx - size_x(), fully - size_y());
}
////////////////////////////////////////////////////////////////////
// Function: MovieVideoCursor::set_time
// Access: Published, Virtual
// Description: Updates the cursor to the indicated time. If
// loop_count >= 1, the time is clamped to the movie's
// length * loop_count. If loop_count <= 0, the time is
// understood to be modulo the movie's length.
// Returns true if a new frame is now available, false
// otherwise. If this returns true, you should
// immediately follow this with exactly *one* call to
// one of the fetch_*() methods.
////////////////////////////////////////////////////////////////////
bool MovieVideoCursor::
set_time(double time, int loop_count) {
return true;
}
////////////////////////////////////////////////////////////////////
// Function: MovieVideoCursor::fetch_into_bitbucket
// Access: Published, Virtual
// Description: Discards the next video frame. Still sets
// last_start and next_start.
// Description: Discards the next video frame.
//
// See fetch_buffer for more details.
////////////////////////////////////////////////////////////////////
void MovieVideoCursor::
fetch_into_bitbucket(double time) {
fetch_into_bitbucket() {
// This generic implementation is layered on fetch_buffer.
// It will work for any derived class, so it is never necessary to
@@ -95,10 +106,8 @@ fetch_into_bitbucket(double time) {
// implementation, but since this function is rarely used, it
// probably isn't worth the trouble.
Buffer *buffer = fetch_buffer(time);
PT(Buffer) buffer = fetch_buffer();
if (buffer != NULL) {
_last_start = buffer->_begin_time;
_next_start = buffer->_end_time;
release_buffer(buffer);
}
}
@@ -112,7 +121,7 @@ fetch_into_bitbucket(double time) {
// See fetch_buffer for more details.
////////////////////////////////////////////////////////////////////
void MovieVideoCursor::
fetch_into_texture(double time, Texture *t, int page) {
fetch_into_texture(Texture *t, int page) {
static PStatCollector fetch_into_texture_collector("*:Decode Video into Texture");
PStatTimer timer(fetch_into_texture_collector);
@@ -133,15 +142,12 @@ fetch_into_texture(double time, Texture *t, int page) {
unsigned char *data = img.p() + page * t->get_expected_ram_page_size();
Buffer *buffer = fetch_buffer(time);
PT(Buffer) buffer = fetch_buffer();
if (buffer == NULL) {
// No image available.
return;
}
_last_start = buffer->_begin_time;
_next_start = buffer->_end_time;
if (t->get_x_size() == size_x() && t->get_num_components() == get_num_components()) {
memcpy(data, buffer->_block, size_x() * size_y() * get_num_components());
@@ -182,7 +188,7 @@ fetch_into_texture(double time, Texture *t, int page) {
// See fetch_buffer for more details.
////////////////////////////////////////////////////////////////////
void MovieVideoCursor::
fetch_into_texture_alpha(double time, Texture *t, int page, int alpha_src) {
fetch_into_texture_alpha(Texture *t, int page, int alpha_src) {
// This generic implementation is layered on fetch_buffer.
// It will work for any derived class, so it is never necessary to
@@ -197,15 +203,12 @@ fetch_into_texture_alpha(double time, Texture *t, int page, int alpha_src) {
nassertv(page < t->get_z_size());
nassertv((alpha_src >= 0) && (alpha_src <= get_num_components()));
Buffer *buffer = fetch_buffer(time);
PT(Buffer) buffer = fetch_buffer();
if (buffer == NULL) {
// No image available.
return;
}
_last_start = buffer->_begin_time;
_next_start = buffer->_end_time;
t->set_keep_ram_image(true);
PTA_uchar img = t->modify_ram_image();
@@ -248,7 +251,7 @@ fetch_into_texture_alpha(double time, Texture *t, int page, int alpha_src) {
// See fetch_buffer for more details.
////////////////////////////////////////////////////////////////////
void MovieVideoCursor::
fetch_into_texture_rgb(double time, Texture *t, int page) {
fetch_into_texture_rgb(Texture *t, int page) {
// This generic implementation is layered on fetch_buffer.
// It will work for any derived class, so it is never necessary to
@@ -262,15 +265,12 @@ fetch_into_texture_rgb(double time, Texture *t, int page) {
nassertv(t->get_component_width() == 1);
nassertv(page < t->get_z_size());
Buffer *buffer = fetch_buffer(time);
PT(Buffer) buffer = fetch_buffer();
if (buffer == NULL) {
// No image available.
return;
}
_last_start = buffer->_begin_time;
_next_start = buffer->_end_time;
t->set_keep_ram_image(true);
PTA_uchar img = t->modify_ram_image();
@@ -297,13 +297,11 @@ fetch_into_texture_rgb(double time, Texture *t, int page) {
// Function: MovieVideoCursor::fetch_buffer
// Access: Published, Virtual
// Description: Reads the specified video frame and returns it in a
// pre-allocated buffer. The frame's begin and end
// times are stored in _begin_time and _end_time, within
// the buffer. After you have copied the data from the
// buffer, you should call release_buffer() to make the
// space available again to populate the next frame.
// You may not call fetch_buffer() again until you have
// called release_buffer().
// pre-allocated buffer. After you have copied the data
// from the buffer, you should call release_buffer() to
// make the space available again to populate the next
// frame. You may not call fetch_buffer() again until
// you have called release_buffer().
//
// If the movie reports that it can_seek, you may
// also specify a timestamp less than next_start.
@@ -316,39 +314,9 @@ fetch_into_texture_rgb(double time, Texture *t, int page) {
// desired location. Only if can_seek_fast returns
// true can it seek rapidly.
////////////////////////////////////////////////////////////////////
MovieVideoCursor::Buffer *MovieVideoCursor::
fetch_buffer(double time) {
Buffer *buffer = get_standard_buffer();
// The following is the implementation of the null video stream, ie,
// a stream of blinking red and blue frames. This method must be
// overridden by the subclass.
buffer->_begin_time = floor(time);
buffer->_end_time = buffer->_begin_time + 1;
int flash = ((int)buffer->_begin_time) & 1;
unsigned char *p = buffer->_block;
int src_width = get_num_components();
for (int y = 0; y < size_y(); ++y) {
for (int x = 0; x < size_x(); ++x) {
if (flash) {
p[0] = 255;
p[1] = 128;
p[2] = 128;
} else {
p[0] = 128;
p[1] = 128;
p[2] = 255;
}
if (src_width == 4) {
p[3] = 255;
}
p += src_width;
}
}
return buffer;
PT(MovieVideoCursor::Buffer) MovieVideoCursor::
fetch_buffer() {
return NULL;
}
////////////////////////////////////////////////////////////////////
@@ -360,8 +328,7 @@ fetch_buffer(double time) {
////////////////////////////////////////////////////////////////////
void MovieVideoCursor::
release_buffer(Buffer *buffer) {
nassertv(buffer == &_standard_buffer);
nassertv(buffer->_begin_time == _last_start && buffer->_end_time == _next_start);
nassertv(buffer == _standard_buffer);
}
////////////////////////////////////////////////////////////////////
@@ -374,43 +341,21 @@ release_buffer(Buffer *buffer) {
////////////////////////////////////////////////////////////////////
MovieVideoCursor::Buffer *MovieVideoCursor::
get_standard_buffer() {
if (_standard_buffer._block == NULL) {
_standard_buffer._block_size = size_x() * size_y() * get_num_components();
_standard_buffer._block = (unsigned char *)PANDA_MALLOC_ARRAY(_standard_buffer._block_size);
_standard_buffer._begin_time = -1.0;
_standard_buffer._end_time = 0.0;
if (_standard_buffer == NULL) {
_standard_buffer = make_new_buffer();
}
return &_standard_buffer;
return _standard_buffer;
}
////////////////////////////////////////////////////////////////////
// Function: MovieVideoCursor::internal_alloc_buffer
// Access: Protected
// Function: MovieVideoCursor::make_new_buffer
// Access: Protected, Virtual
// Description: May be called by a derived class to allocate a new
// Buffer object. The caller is responsible for
// eventually passing this object to
// internal_free_buffer().
// Buffer object.
////////////////////////////////////////////////////////////////////
MovieVideoCursor::Buffer *MovieVideoCursor::
internal_alloc_buffer() {
Buffer *buffer = new Buffer;
buffer->_block_size = size_x() * size_y() * get_num_components();
buffer->_block = (unsigned char *)PANDA_MALLOC_ARRAY(buffer->_block_size);
buffer->_begin_time = -1.0;
buffer->_end_time = 0.0;
return buffer;
}
////////////////////////////////////////////////////////////////////
// Function: MovieVideoCursor::internal_free_buffer
// Access: Protected
// Description: Frees a Buffer object allocated via
// internal_alloc_buffer().
////////////////////////////////////////////////////////////////////
void MovieVideoCursor::
internal_free_buffer(Buffer *buffer) {
PANDA_FREE_ARRAY(buffer->_block);
delete buffer;
PT(MovieVideoCursor::Buffer) MovieVideoCursor::
make_new_buffer() {
return new Buffer(size_x() * size_y() * get_num_components());
}
////////////////////////////////////////////////////////////////////
+13 -15
View File
@@ -53,31 +53,31 @@ PUBLISHED:
INLINE bool can_seek() const;
INLINE bool can_seek_fast() const;
INLINE bool aborted() const;
INLINE double last_start() const;
INLINE double next_start() const;
INLINE bool ready() const;
INLINE bool streaming() const;
void setup_texture(Texture *tex) const;
virtual void fetch_into_bitbucket(double time);
virtual void fetch_into_texture(double time, Texture *t, int page);
virtual void fetch_into_texture_rgb(double time, Texture *t, int page);
virtual void fetch_into_texture_alpha(double time, Texture *t, int page, int alpha_src);
virtual bool set_time(double time, int loop_count);
virtual void fetch_into_bitbucket();
virtual void fetch_into_texture(Texture *t, int page);
virtual void fetch_into_texture_rgb(Texture *t, int page);
virtual void fetch_into_texture_alpha(Texture *t, int page, int alpha_src);
public:
class Buffer : public MemoryBase {
class Buffer : public ReferenceCount {
public:
INLINE Buffer(size_t block_size);
INLINE virtual ~Buffer();
unsigned char *_block;
size_t _block_size;
double _begin_time;
double _end_time;
};
virtual Buffer *fetch_buffer(double time);
virtual PT(Buffer) fetch_buffer();
virtual void release_buffer(Buffer *buffer);
protected:
Buffer *get_standard_buffer();
Buffer *internal_alloc_buffer();
void internal_free_buffer(Buffer *buffer);
virtual PT(Buffer) make_new_buffer();
protected:
PT(MovieVideo) _source;
@@ -88,12 +88,10 @@ protected:
bool _can_seek;
bool _can_seek_fast;
bool _aborted;
double _last_start;
double _next_start;
bool _streaming;
bool _ready;
Buffer _standard_buffer;
PT(Buffer) _standard_buffer;
public:
virtual void write_datagram(BamWriter *manager, Datagram &dg);
+3 -5
View File
@@ -34,8 +34,6 @@ WebcamVideoCursorOpenCV(WebcamVideoOpenCV *src) : MovieVideoCursor(src) {
_can_seek = false;
_can_seek_fast = false;
_aborted = false;
_last_start = -1.0;
_next_start = 0.0;
_streaming = true;
_ready = false;
@@ -65,13 +63,13 @@ WebcamVideoCursorOpenCV::
// Access: Published, Virtual
// Description:
////////////////////////////////////////////////////////////////////
MovieVideoCursor::Buffer *WebcamVideoCursorOpenCV::
fetch_buffer(double time) {
PT(MovieVideoCursor::Buffer) WebcamVideoCursorOpenCV::
fetch_buffer() {
if (!_ready) {
return NULL;
}
Buffer *buffer = get_standard_buffer();
PT(Buffer) buffer = get_standard_buffer();
unsigned char *dest = buffer->_block;
int num_components = get_num_components();
nassertr(num_components == 3, NULL);
+1 -1
View File
@@ -31,7 +31,7 @@ class WebcamVideoCursorOpenCV : public MovieVideoCursor {
public:
WebcamVideoCursorOpenCV(WebcamVideoOpenCV *src);
virtual ~WebcamVideoCursorOpenCV();
virtual Buffer *fetch_buffer(double time);
virtual PT(Buffer) fetch_buffer();
private:
bool get_frame_data(const unsigned char *&r,
+3 -5
View File
@@ -137,8 +137,6 @@ WebcamVideoCursorV4L(WebcamVideoV4L *src) : MovieVideoCursor(src) {
_can_seek = false;
_can_seek_fast = false;
_aborted = false;
_last_start = -1.0;
_next_start = 0.0;
_streaming = true;
_ready = false;
_format = (struct v4l2_format *) malloc(sizeof(struct v4l2_format));
@@ -312,13 +310,13 @@ WebcamVideoCursorV4L::
// Access: Published, Virtual
// Description:
////////////////////////////////////////////////////////////////////
MovieVideoCursor::Buffer *WebcamVideoCursorV4L::
fetch_buffer(double time) {
PT(MovieVideoCursor::Buffer) WebcamVideoCursorV4L::
fetch_buffer() {
if (!_ready) {
return NULL;
}
Buffer *buffer = get_standard_buffer();
PT(Buffer) buffer = get_standard_buffer();
unsigned char *block = buffer->_block;
struct v4l2_buffer vbuf;
memset(&vbuf, 0, sizeof vbuf);
+1 -1
View File
@@ -37,7 +37,7 @@ class WebcamVideoCursorV4L : public MovieVideoCursor {
public:
WebcamVideoCursorV4L(WebcamVideoV4L *src);
virtual ~WebcamVideoCursorV4L();
virtual Buffer *fetch_buffer(double time);
virtual PT(Buffer) fetch_buffer();
private:
int _fd;