2.8 branch: This adds the new ffmpeh plugin, up to a point. Further changes look messy, but will try to merge them. Revisions merged in this commit: 9816-9818, 9826-9827, 9837-9843, 9847, 9850, 9854, 9856-9857, 9860-9861, 9865, 9869, and 9885.

This commit is contained in:
Paul MARTZ
2010-03-17 17:41:14 +00:00
parent cfa9c3682f
commit bdcba7220c
30 changed files with 3299 additions and 4 deletions

View File

@@ -305,7 +305,9 @@ FIND_PACKAGE(CURL)
FIND_PACKAGE(ITK)
FIND_PACKAGE(LibVNCServer)
FIND_PACKAGE(OurDCMTK)
FIND_PACKAGE(OpenAL)
FIND_PACKAGE(XUL)
FIND_PACKAGE(FFmpeg)
#use pkg-config to find various modues
INCLUDE(FindPkgConfig OPTIONAL)

View File

@@ -0,0 +1,44 @@
# Locate ffmpeg
# This module defines
# FFMPEG_LIBRARIES
# FFMPEG_FOUND, if false, do not try to link to ffmpeg
# FFMPEG_INCLUDE_DIR, where to find the headers
#
# $FFMPEG_DIR is an environment variable that would
# correspond to the ./configure --prefix=$FFMPEG_DIR
#
# Created by Robert Osfield.
#use pkg-config to find various modes
INCLUDE(FindPkgConfig OPTIONAL)
IF(PKG_CONFIG_FOUND)
INCLUDE(FindPkgConfig)
pkg_check_modules(FFMPEG_LIBAVFORMAT libavformat)
pkg_check_modules(FFMPEG_LIBAVDEVICE libavdevice)
pkg_check_modules(FFMPEG_LIBAVCODEC libavcodec)
pkg_check_modules(FFMPEG_LIBAVUTIL libavutil)
pkg_check_modules(FFMPEG_LIBSWSCALE libswscale)
ENDIF(PKG_CONFIG_FOUND)
SET(FFMPEG_FOUND "NO")
IF (FFMPEG_LIBAVFORMAT_FOUND AND FFMPEG_LIBAVDEVICE_FOUND AND FFMPEG_LIBAVCODEC_FOUND AND FFMPEG_LIBAVUTIL_FOUND)
SET(FFMPEG_FOUND "YES")
SET(FFMPEG_INCLUDE_DIRS ${FFMPEG_LIBAVFORMAT_INCLUDE_DIRS})
SET(FFMPEG_LIBRARY_DIRS ${FFMPEG_LIBAVFORMAT_LIBRARY_DIRS})
SET(FFMPEG_LIBRARIES
${FFMPEG_LIBAVFORMAT_LIBRARIES}
${FFMPEG_LIBAVDEVICE_LIBRARIES}
${FFMPEG_LIBAVCODEC_LIBRARIES}
${FFMPEG_LIBAVUTIL_LIBRARIES})
ENDIF(FFMPEG_LIBAVFORMAT_FOUND AND FFMPEG_LIBAVDEVICE_FOUND AND FFMPEG_LIBAVCODEC_FOUND AND FFMPEG_LIBAVUTIL_FOUND)

View File

@@ -1,7 +1,9 @@
#this file is automatically generated
# INCLUDE_DIRECTORIES( ${OPENAL_INCLUDE_DIR} )
SET(TARGET_SRC osgmovie.cpp )
SET(TARGET_ADDED_LIBRARIES osgGA )
# SET(TARGET_EXTERNAL_LIBRARIES ${OPENAL_LIBRARY} alut)
#### end var setup ###
SETUP_EXAMPLE(osgmovie)

View File

@@ -320,6 +320,30 @@ osg::Geometry* myCreateTexturedQuadGeometry(const osg::Vec3& pos,float width,flo
}
}
class CustomAudioSink : public osg::AudioSink
{
public:
CustomAudioSink(osg::AudioStream* audioStream):
_playing(false),
_audioStream(audioStream) {}
virtual void startPlaying()
{
_playing = true;
osg::notify(osg::NOTICE)<<"CustomAudioSink()::startPlaying()"<<std::endl;
osg::notify(osg::NOTICE)<<" audioFrequency()="<<_audioStream->audioFrequency()<<std::endl;
osg::notify(osg::NOTICE)<<" audioNbChannels()="<<_audioStream->audioNbChannels()<<std::endl;
osg::notify(osg::NOTICE)<<" audioSampleFormat()="<<_audioStream->audioSampleFormat()<<std::endl;
}
virtual bool playing() const { return _playing; }
bool _playing;
osg::observer_ptr<osg::AudioStream> _audioStream;
};
int main(int argc, char** argv)
{
// use an ArgumentParser object to manage the program arguments.
@@ -425,6 +449,9 @@ int main(int argc, char** argv)
osg::Vec3 bottomright = pos;
bool xyPlane = fullscreen;
bool useOpenALAudio = false;
while(arguments.read("--OpenAL")) { useOpenALAudio = true; }
for(int i=1;i<arguments.argc();++i)
{
@@ -432,7 +459,19 @@ int main(int argc, char** argv)
{
osg::Image* image = osgDB::readImageFile(arguments[i]);
osg::ImageStream* imagestream = dynamic_cast<osg::ImageStream*>(image);
if (imagestream) imagestream->play();
if (imagestream)
{
osg::ImageStream::AudioStreams& audioStreams = imagestream->getAudioStreams();
if (useOpenALAudio && !audioStreams.empty())
{
osg::AudioStream* audioStream = audioStreams[0].get();
osg::notify(osg::NOTICE)<<"AudioStream read ["<<audioStream->getName()<<"]"<<std::endl;
audioStream->setAudioSink(new CustomAudioSink(audioStream));
}
imagestream->play();
}
if (image)
{

80
include/osg/AudioStream Normal file
View File

@@ -0,0 +1,80 @@
/* -*-c++-*- OpenSceneGraph - Copyright (C) 1998-2006 Robert Osfield
*
* This library is open source and may be redistributed and/or modified under
* the terms of the OpenSceneGraph Public License (OSGPL) version 0.0 or
* (at your option) any later version. The full license is in LICENSE file
* included with this distribution, and on the openscenegraph.org website.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* OpenSceneGraph Public License for more details.
*/
#ifndef OSG_AUDIOSTREAM
#define OSG_AUDIOSTREAM 1
#include <osg/Image>
namespace osg {
/** Pure virtual AudioSink bass class that is used to connect the audio system with AudioStreams. */
class OSG_EXPORT AudioSink : public osg::Object
{
public:
AudioSink();
virtual void startPlaying() = 0;
virtual bool playing() const = 0;
virtual double getDelay() const { return _delay; }
virtual void setDelay(const double delay) { _delay = delay; }
virtual const char * libraryName() const { return "osgFFmpeg"; }
virtual const char * className() const { return "AudioSinkInterface"; }
private:
virtual AudioSink * cloneType() const { return 0; }
virtual AudioSink * clone(const osg::CopyOp &) const { return 0; }
double _delay;
};
/** Pure virtual AudioStream base class. Subclasses provide mechanism for reading/generating audio data*/
class OSG_EXPORT AudioStream : public osg::Object
{
public:
AudioStream();
/** Copy constructor using CopyOp to manage deep vs shallow copy. */
AudioStream(const AudioStream& audio,const CopyOp& copyop=CopyOp::SHALLOW_COPY);
virtual bool isSameKindAs(const Object* obj) const { return dynamic_cast<const AudioStream*>(obj)!=0; }
virtual const char* libraryName() const { return "osg"; }
virtual const char* className() const { return "AudioStream"; }
virtual void setAudioSink(osg::AudioSink* audio_sink) = 0;
virtual void consumeAudioBuffer(void * const buffer, const size_t size) = 0;
virtual bool audioStream() const = 0;
virtual int audioFrequency() const = 0;
virtual int audioNbChannels() const = 0;
enum SampleFormat
{
SAMPLE_FORMAT_U8,
SAMPLE_FORMAT_S16,
SAMPLE_FORMAT_S24,
SAMPLE_FORMAT_S32,
SAMPLE_FORMAT_F32
};
virtual SampleFormat audioSampleFormat() const = 0;
};
} // namespace
#endif

View File

@@ -15,6 +15,7 @@
#define OSG_IMAGESTREAM 1
#include <osg/Image>
#include <osg/AudioStream>
namespace osg {
@@ -86,6 +87,12 @@ class OSG_EXPORT ImageStream : public Image
virtual void setVolume(float) {}
virtual float getVolume() const { return 0.0f; }
typedef std::vector< osg::ref_ptr<osg::AudioStream> > AudioStreams;
void setAudioStreams(const AudioStreams& asl) { _audioStreams = asl; }
AudioStreams& getAudioStreams() { return _audioStreams; }
const AudioStreams& getAudioStreams() const { return _audioStreams; }
protected:
@@ -95,6 +102,8 @@ class OSG_EXPORT ImageStream : public Image
StreamStatus _status;
LoopingMode _loopingMode;
AudioStreams _audioStreams;
};
} // namespace

30
src/osg/AudioStream.cpp Normal file
View File

@@ -0,0 +1,30 @@
/* -*-c++-*- OpenSceneGraph - Copyright (C) 1998-2006 Robert Osfield
*
* This library is open source and may be redistributed and/or modified under
* the terms of the OpenSceneGraph Public License (OSGPL) version 0.0 or
* (at your option) any later version. The full license is in LICENSE file
* included with this distribution, and on the openscenegraph.org website.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* OpenSceneGraph Public License for more details.
*/
#include <osg/AudioStream>
using namespace osg;
AudioSink::AudioSink() :
_delay(0.0)
{
}
AudioStream::AudioStream()
{
}
AudioStream::AudioStream(const AudioStream& audio,const CopyOp& copyop):
osg::Object(audio, copyop)
{
}

View File

@@ -25,6 +25,7 @@ SET(LIB_PUBLIC_HEADERS
${HEADER_PATH}/ApplicationUsage
${HEADER_PATH}/ArgumentParser
${HEADER_PATH}/Array
${HEADER_PATH}/AudioStream
${HEADER_PATH}/AutoTransform
${HEADER_PATH}/Billboard
${HEADER_PATH}/BlendColor
@@ -194,6 +195,7 @@ ADD_LIBRARY(${LIB_NAME}
ApplicationUsage.cpp
ArgumentParser.cpp
Array.cpp
AudioStream.cpp
AutoTransform.cpp
Billboard.cpp
BlendColor.cpp

View File

@@ -32,7 +32,8 @@ ImageStream::ImageStream():
ImageStream::ImageStream(const ImageStream& image,const CopyOp& copyop):
Image(image,copyop),
_status(image._status),
_loopingMode(image._loopingMode)
_loopingMode(image._loopingMode),
_audioStreams(image._audioStreams)
{
}

View File

@@ -205,6 +205,14 @@ IF(APPLE)
ADD_SUBDIRECTORY(imageio)
ENDIF(APPLE)
IF(FFMPEG_FOUND)
ADD_SUBDIRECTORY(ffmpeg)
ENDIF(FFMPEG_FOUND)
IF(OPENAL_FOUND)
ADD_SUBDIRECTORY(OpenAL)
ENDIF(OPENAL_FOUND)
IF(QUICKTIME_FOUND)
ADD_SUBDIRECTORY(quicktime)
ENDIF(QUICKTIME_FOUND)

View File

@@ -0,0 +1,40 @@
#ifndef HEADER_GUARD_OSGFFMPEG_AUDIO_SINK_INTERFACE_H
#define HEADER_GUARD_OSGFFMPEG_AUDIO_SINK_INTERFACE_H
#include <osg/Object>
namespace osgFFmpeg
{
class AudioSinkInterface : public osg::Object
{
public:
AudioSinkInterface() :
m_delay(0.0) { }
virtual void startPlaying() = 0;
virtual bool playing() const = 0;
virtual double getDelay() const { return m_delay; }
virtual void setDelay(const double delay) { m_delay = delay; }
virtual const char * libraryName() const { return "osgFFmpeg"; }
virtual const char * className() const { return "AudioSinkInterface"; }
private:
virtual AudioSinkInterface * cloneType() const { return 0; }
virtual AudioSinkInterface * clone(const osg::CopyOp &) const { return 0; }
double m_delay;
};
}
#endif // HEADER_GUARD_OSGFFMPEG_AUDIO_SINK_INTERFACE_H

View File

@@ -0,0 +1,322 @@
#ifndef HEADER_GUARD_OSGFFMPEG_BOUNDED_MESSAGE_QUEUE_H
#define HEADER_GUARD_OSGFFMPEG_BOUNDED_MESSAGE_QUEUE_H
#include <OpenThreads/Condition>
#include <OpenThreads/Mutex>
#include <OpenThreads/ScopedLock>
#include <cassert>
#include <algorithm>
#include <vector>
namespace osgFFmpeg {
template <class T>
class BoundedMessageQueue
{
public:
typedef T value_type;
typedef size_t size_type;
explicit BoundedMessageQueue(size_type capacity);
~BoundedMessageQueue();
void clear();
template <class Destructor>
void flush(const Destructor destructor);
void push(const value_type & value);
bool tryPush(const value_type & value);
bool timedPush(const value_type & value, unsigned long ms);
value_type pop();
value_type tryPop(bool & is_empty);
value_type timedPop(bool & is_empty, unsigned long ms);
private:
BoundedMessageQueue(const BoundedMessageQueue &);
BoundedMessageQueue & operator = (const BoundedMessageQueue &);
typedef std::vector<T> Buffer;
typedef OpenThreads::Condition Condition;
typedef OpenThreads::Mutex Mutex;
typedef OpenThreads::ScopedLock<Mutex> ScopedLock;
bool isFull() const;
bool isEmpty() const;
void unsafePush(const value_type & value);
value_type unsafePop();
Buffer m_buffer;
size_type m_begin;
size_type m_end;
size_type m_size;
Mutex m_mutex;
Condition m_not_empty;
Condition m_not_full;
};
template <class T>
BoundedMessageQueue<T>::BoundedMessageQueue(const size_type capacity) :
m_buffer(capacity),
m_begin(0),
m_end(0),
m_size(0)
{
}
template <class T>
BoundedMessageQueue<T>::~BoundedMessageQueue()
{
}
template <class T>
void BoundedMessageQueue<T>::clear()
{
{
ScopedLock lock(m_mutex);
m_buffer.clear();
m_begin = 0;
m_end = 0;
m_size = 0;
}
m_not_full.broadcast();
}
template <class T>
template <class Destructor>
void BoundedMessageQueue<T>::flush(const Destructor destructor)
{
{
ScopedLock lock(m_mutex);
while (! isEmpty())
{
value_type value = unsafePop();
destructor(value);
}
m_begin = 0;
m_end = 0;
m_size = 0;
}
m_not_full.broadcast();
}
template <class T>
void BoundedMessageQueue<T>::push(const value_type & value)
{
{
ScopedLock lock(m_mutex);
while (isFull())
m_not_full.wait(&m_mutex);
unsafePush(value);
}
m_not_empty.signal();
}
template <class T>
bool BoundedMessageQueue<T>::tryPush(const value_type & value)
{
{
ScopedLock lock(m_mutex);
if (isFull())
return false;
unsafePush(value);
}
m_not_empty.signal();
return true;
}
template <class T>
bool BoundedMessageQueue<T>::timedPush(const value_type & value, const unsigned long ms)
{
// We don't wait in a loop to avoid an infinite loop (as the ms timeout would not be decremented).
// This means that timedPush() could return false before the timeout has been hit.
{
ScopedLock lock(m_mutex);
if (isFull())
m_not_full.wait(&m_mutex, ms);
if (isFull())
return false;
unsafePush(value);
}
m_not_empty.signal();
return true;
}
template <class T>
typename BoundedMessageQueue<T>::value_type BoundedMessageQueue<T>::pop()
{
value_type value;
{
ScopedLock lock(m_mutex);
while (isEmpty())
m_not_empty.wait(&m_mutex);
value = unsafePop();
}
m_not_full.signal();
return value;
}
template <class T>
typename BoundedMessageQueue<T>::value_type BoundedMessageQueue<T>::tryPop(bool & is_empty)
{
value_type value;
{
ScopedLock lock(m_mutex);
is_empty = isEmpty();
if (is_empty)
return value_type();
value = unsafePop();
}
m_not_full.signal();
return value;
}
template <class T>
typename BoundedMessageQueue<T>::value_type BoundedMessageQueue<T>::timedPop(bool & is_empty, const unsigned long ms)
{
value_type value;
{
ScopedLock lock(m_mutex);
// We don't wait in a loop to avoid an infinite loop (as the ms timeout would not be decremented).
// This means that timedPop() could return with (is_empty = true) before the timeout has been hit.
if (isEmpty())
m_not_empty.wait(&m_mutex, ms);
is_empty = isEmpty();
if (is_empty)
return value_type();
value = unsafePop();
}
m_not_full.signal();
return value;
}
template <class T>
inline bool BoundedMessageQueue<T>::isFull() const
{
return m_size == m_buffer.size();
}
template <class T>
inline bool BoundedMessageQueue<T>::isEmpty() const
{
return m_size == 0;
}
template <class T>
inline void BoundedMessageQueue<T>::unsafePush(const value_type & value)
{
// Note: this shall never be called if the queue is full.
assert(! isFull());
m_buffer[m_end++] = value;
if (m_end == m_buffer.size())
m_end = 0;
++m_size;
}
template <class T>
inline typename BoundedMessageQueue<T>::value_type BoundedMessageQueue<T>::unsafePop()
{
// Note: this shall never be called if the queue is empty.
assert(! isEmpty());
const size_t pos = m_begin;
++m_begin;
--m_size;
if (m_begin == m_buffer.size())
m_begin = 0;
return m_buffer[pos];
}
} // namespace osgFFmpeg
#endif // HEADER_GUARD_OSGFFMPEG_BOUNDED_MESSAGE_QUEUE_H

View File

@@ -0,0 +1,66 @@
# INCLUDE_DIRECTORIES( ${FFMPEG_INCLUDE_DIRS} )
IF (FFMPEG_LIBSWSCALE_FOUND)
INCLUDE_DIRECTORIES(
${FFMPEG_LIBAVFORMAT_INCLUDE_DIRS} ${FFMPEG_LIBAVFORMAT_INCLUDE_DIRS}/libavformat ${FFMPEG_LIBAVFORMAT_INCLUDE_DIRS}/ffmpeg
${FFMPEG_LIBAVDEVICE_INCLUDE_DIRS} ${FFMPEG_LIBAVDEVICE_INCLUDE_DIRS}/libavdevice ${FFMPEG_LIBAVDEVICE_INCLUDE_DIRS}/ffmpeg
${FFMPEG_LIBAVCODEC_INCLUDE_DIRS} ${FFMPEG_LIBAVCODEC_INCLUDE_DIRS}/libavcodec ${FFMPEG_LIBAVCODEC_INCLUDE_DIRS}/ffmpeg
${FFMPEG_LIBAVUTIL_INCLUDE_DIRS} ${FFMPEG_LIBAVUTIL_INCLUDE_DIRS}/libavcodec ${FFMPEG_LIBAVUTIL_INCLUDE_DIRS}/ffmpeg
${FFMPEG_LIBSWSCALE_INCLUDE_DIRS} ${FFMPEG_LIBSWSCALE_INCLUDE_DIRS}/libswscale ${FFMPEG_LIBSWSCALE_INCLUDE_DIRS}/ffmpeg
)
ADD_DEFINITIONS(-DUSE_SWSCALE)
LINK_DIRECTORIES(${FFMPEG_LIBRARY_DIRS})
SET(TARGET_EXTERNAL_LIBRARIES ${FFMPEG_LIBRARIES} ${FFMPEG_LIBSWSCALE_LIBRARIES})
ELSE(FFMPEG_LIBSWSCALE_FOUND)
INCLUDE_DIRECTORIES(
${FFMPEG_LIBAVFORMAT_INCLUDE_DIRS} ${FFMPEG_LIBAVFORMAT_INCLUDE_DIRS}/libavformat ${FFMPEG_LIBAVFORMAT_INCLUDE_DIRS}/ffmpeg
${FFMPEG_LIBAVDEVICE_INCLUDE_DIRS} ${FFMPEG_LIBAVDEVICE_INCLUDE_DIRS}/libavdevice ${FFMPEG_LIBAVDEVICE_INCLUDE_DIRS}/ffmpeg
${FFMPEG_LIBAVCODEC_INCLUDE_DIRS} ${FFMPEG_LIBAVCODEC_INCLUDE_DIRS}/libavcodec ${FFMPEG_LIBAVCODEC_INCLUDE_DIRS}/ffmpeg
${FFMPEG_LIBAVUTIL_INCLUDE_DIRS} ${FFMPEG_LIBAVUTIL_INCLUDE_DIRS}/libavcodec ${FFMPEG_LIBAVUTIL_INCLUDE_DIRS}/ffmpeg
)
LINK_DIRECTORIES(${FFMPEG_LIBRARY_DIRS})
SET(TARGET_EXTERNAL_LIBRARIES ${FFMPEG_LIBRARIES} )
ENDIF()
# MESSAGE("FFMPEG_LIBAVFORMAT_INCLUDE_DIRS = " ${FFMPEG_LIBAVFORMAT_INCLUDE_DIRS} )
# MESSAGE("FFMPEG_LIBAVDEVICE_INCLUDE_DIRS = " ${FFMPEG_LIBAVDEVICE_INCLUDE_DIRS} )
# MESSAGE("FFMPEG_LIBAVCODEC_INCLUDE_DIRS = " ${FFMPEG_LIBAVCODEC_INCLUDE_DIRS} )
# MESSAGE("FFMPEG_LIBAVUTIL_INCLUDE_DIRS = " ${FFMPEG_LIBAVUTIL_INCLUDE_DIRS} )
# MESSAGE("FFMPEG_LIBRARIES = " ${FFMPEG_LIBRARIES} )
SET(TARGET_SRC
FFmpegClocks.cpp
FFmpegDecoderAudio.cpp
FFmpegDecoder.cpp
FFmpegDecoderVideo.cpp
FFmpegImageStream.cpp
FFmpegAudioStream.cpp
ReaderWriterFFmpeg.cpp
)
SET(TARGET_H
BoundedMessageQueue.hpp
FFmpegClocks.hpp
FFmpegDecoderAudio.hpp
FFmpegDecoder.hpp
FFmpegDecoderVideo.hpp
FFmpegHeaders.hpp
FFmpegPacket.hpp
FFmpegImageStream.hpp
FFmpegAudioStream.hpp
MessageQueue.hpp
)
#### end var setup ###
SETUP_PLUGIN(ffmpeg ffmpeg)

View File

@@ -0,0 +1,81 @@
#include "FFmpegAudioStream.hpp"
#include <OpenThreads/ScopedLock>
#include <osg/Notify>
#include "FFmpegDecoder.hpp"
#include "MessageQueue.hpp"
#include <memory>
namespace osgFFmpeg {
FFmpegAudioStream::FFmpegAudioStream(FFmpegDecoder* decoder):
m_decoder(decoder)
{
}
FFmpegAudioStream::FFmpegAudioStream(const FFmpegAudioStream & audio, const osg::CopyOp & copyop) :
osg::AudioStream(audio, copyop)
{
}
FFmpegAudioStream::~FFmpegAudioStream()
{
// detact the audio sink first to avoid destrction order issues.
setAudioSink(0);
}
void FFmpegAudioStream::setAudioSink(osg::AudioSink* audio_sink)
{
osg::notify(osg::NOTICE)<<"FFmpegAudioStream::setAudioSink( "<<audio_sink<<")"<<std::endl;
m_decoder->audio_decoder().setAudioSink(audio_sink);
}
void FFmpegAudioStream::consumeAudioBuffer(void * const buffer, const size_t size)
{
m_decoder->audio_decoder().fillBuffer(buffer, size);
}
double FFmpegAudioStream::duration() const
{
return m_decoder->duration();
}
bool FFmpegAudioStream::audioStream() const
{
return m_decoder->audio_decoder().validContext();
}
int FFmpegAudioStream::audioFrequency() const
{
return m_decoder->audio_decoder().frequency();
}
int FFmpegAudioStream::audioNbChannels() const
{
return m_decoder->audio_decoder().nbChannels();
}
osg::AudioStream::SampleFormat FFmpegAudioStream::audioSampleFormat() const
{
return m_decoder->audio_decoder().sampleFormat();
}
} // namespace osgFFmpeg

View File

@@ -0,0 +1,43 @@
#ifndef HEADER_GUARD_OSGFFMPEG_FFMPEG_AUDIO_STREAM_H
#define HEADER_GUARD_OSGFFMPEG_FFMPEG_AUDIO_STREAM_H
#include <osg/AudioStream>
#include "FFmpegDecoder.hpp"
namespace osgFFmpeg
{
class FFmpegAudioStream : public osg::AudioStream
{
public:
FFmpegAudioStream(FFmpegDecoder* decoder=0);
FFmpegAudioStream(const FFmpegAudioStream & audio, const osg::CopyOp & copyop = osg::CopyOp::SHALLOW_COPY);
META_Object(osgFFmpeg, FFmpegAudioStream);
virtual void setAudioSink(osg::AudioSink* audio_sink);
void consumeAudioBuffer(void * const buffer, const size_t size);
bool audioStream() const;
int audioFrequency() const;
int audioNbChannels() const;
osg::AudioStream::SampleFormat audioSampleFormat() const;
double duration() const;
private:
virtual ~FFmpegAudioStream();
osg::ref_ptr<FFmpegDecoder> m_decoder;
};
}
#endif // HEADER_GUARD_OSGFFMPEG_FFMPEG_IMAGE_STREAM_H

View File

@@ -0,0 +1,217 @@
#include "FFmpegClocks.hpp"
#include <cmath>
#include <algorithm>
// DEBUG
//#include <iostream>
namespace osgFFmpeg {
namespace
{
const double AV_SYNC_THRESHOLD = 0.01;
const double AV_NOSYNC_THRESHOLD = 10.0;
inline double clamp(const double value, const double min, const double max)
{
return (std::min)((std::max)(value, min), max);
}
}
FFmpegClocks::FFmpegClocks() :
m_video_clock(0),
m_start_time(0),
m_last_frame_delay(0.040),
m_last_frame_pts(0),
m_last_actual_delay(0),
m_frame_time(0),
m_audio_buffer_end_pts(0),
m_audio_delay(0.0),
m_audio_disabled(false),
m_rewind(false)
{
}
void FFmpegClocks::reset(const double start_time)
{
ScopedLock lock(m_mutex);
m_video_clock = start_time;
m_start_time = start_time;
m_last_frame_delay = 0.040;
m_last_frame_pts = start_time - m_last_frame_delay;
m_frame_time = start_time;
m_audio_buffer_end_pts = start_time;
m_audio_timer.setStartTick();
}
void FFmpegClocks::rewindAudio()
{
ScopedLock lock(m_mutex);
m_audio_buffer_end_pts = m_start_time;
m_audio_timer.setStartTick();
m_rewind = ! m_rewind;
}
void FFmpegClocks::rewindVideo()
{
ScopedLock lock(m_mutex);
if (m_audio_disabled)
return;
m_video_clock = m_start_time;
m_last_frame_delay = 0.040;
m_last_frame_pts = m_start_time - m_last_frame_delay;
m_frame_time = m_start_time;
m_rewind = ! m_rewind;
}
void FFmpegClocks::audioSetBufferEndPts(const double pts)
{
ScopedLock lock(m_mutex);
m_audio_buffer_end_pts = pts;
m_audio_timer.setStartTick();
}
void FFmpegClocks::audioAdjustBufferEndPts(double increment)
{
ScopedLock lock(m_mutex);
m_audio_buffer_end_pts += increment;
m_audio_timer.setStartTick();
}
void FFmpegClocks::audioSetDelay(const double delay)
{
m_audio_delay = delay;
}
void FFmpegClocks::audioDisable()
{
ScopedLock lock(m_mutex);
m_audio_disabled = true;
}
double FFmpegClocks::videoSynchClock(const AVFrame * const frame, const double time_base, double pts)
{
if (pts != 0)
{
// If we have a PTS, set the video clock to it.
m_video_clock = pts;
}
else
{
// Else, if we don't, use the video clock value.
pts = m_video_clock;
}
// Update the video clock to take into account the frame delay
double frame_delay = time_base;
frame_delay += frame->repeat_pict * (frame_delay * 0.5);
m_video_clock += frame_delay;
return pts;
}
double FFmpegClocks::videoRefreshSchedule(const double pts)
{
ScopedLock lock(m_mutex);
// DEBUG
//std::cerr << "ftime / dpts / delay / audio_time / adelay: ";
double delay = pts - m_last_frame_pts;
//std::cerr << m_frame_time << " / ";
//std::cerr << delay << " / ";
// If incorrect delay, use previous one
if (delay <= 0.0 || delay >= 1.0)
delay = m_last_frame_delay;
// Save for next time
m_last_frame_delay = delay;
m_last_frame_pts = pts;
// Update the delay to synch to the audio stream
// Ideally the frame time should be incremented after the actual delay is computed.
// But because of the sound latency, it seems better to keep some latency in the video too.
m_frame_time += delay;
const double audio_time = getAudioTime();
const double actual_delay = (! m_rewind) ?
clamp(m_frame_time - audio_time, -0.5*delay, 2.5*delay) :
m_last_actual_delay; // when rewinding audio or video (but the other has yet to be), get the last used delay
//m_frame_time += delay;
// DEBUG
//std::cerr << delay << " / ";
//std::cerr << audio_time << " / ";
//std::cerr << actual_delay << std::endl;
m_last_actual_delay = actual_delay;
return actual_delay;
}
double FFmpegClocks::getStartTime() const
{
return m_start_time;
}
double FFmpegClocks::getAudioTime() const
{
return m_audio_buffer_end_pts + m_audio_timer.time_s() - m_audio_delay;
}
} // namespace osgFFmpeg

View File

@@ -0,0 +1,69 @@
#ifndef HEADER_GUARD_OSGFFMPEG_FFMPEG_CLOCKS_H
#define HEADER_GUARD_OSGFFMPEG_FFMPEG_CLOCKS_H
#include <osg/Timer>
#include <OpenThreads/Mutex>
#include <OpenThreads/ScopedLock>
#include "FFmpegHeaders.hpp"
namespace osgFFmpeg {
class FFmpegClocks
{
public:
FFmpegClocks();
void reset(double start_time);
void rewindAudio();
void rewindVideo();
void audioSetBufferEndPts(double pts);
void audioAdjustBufferEndPts(double increment);
void audioSetDelay(double delay);
void audioDisable();
double videoSynchClock(const AVFrame * frame, double time_base, double pts);
double videoRefreshSchedule(double pts);
double getStartTime() const;
private:
double getAudioTime() const;
typedef osg::Timer Timer;
typedef OpenThreads::Mutex Mutex;
typedef OpenThreads::ScopedLock<Mutex> ScopedLock;
mutable Mutex m_mutex;
double m_video_clock;
double m_start_time;
double m_last_frame_delay;
double m_last_frame_pts;
double m_last_actual_delay;
double m_frame_time;
double m_audio_buffer_end_pts;
double m_audio_delay;
Timer m_audio_timer;
bool m_audio_disabled;
bool m_rewind;
};
} // namespace osgFFmpeg
#endif // HEADER_GUARD_OSGFFMPEG_FFMPEG_CLOCKS_H

View File

@@ -0,0 +1,326 @@
#include "FFmpegDecoder.hpp"
#include <osg/Notify>
#include <cassert>
#include <limits>
#include <stdexcept>
#include <string.h>
namespace osgFFmpeg {
FFmpegDecoder::FFmpegDecoder() :
m_audio_stream(0),
m_video_stream(0),
m_audio_queue(100),
m_video_queue(100),
m_audio_decoder(m_audio_queue, m_clocks),
m_video_decoder(m_video_queue, m_clocks),
m_state(NORMAL),
m_loop(false)
{
}
FFmpegDecoder::~FFmpegDecoder()
{
close(true);
}
bool FFmpegDecoder::open(const std::string & filename)
{
try
{
// Open video file
AVFormatContext * p_format_context = 0;
if (filename.compare(0, 5, "/dev/")==0)
{
avdevice_register_all();
osg::notify(osg::NOTICE)<<"Attempting to stream "<<filename<<std::endl;
AVFormatParameters formatParams;
memset(&formatParams, 0, sizeof(AVFormatParameters));
AVInputFormat *iformat;
formatParams.channel = 0;
formatParams.standard = 0;
formatParams.width = 640;
formatParams.height = 480;
formatParams.time_base.num = 1;
formatParams.time_base.den = 50;
iformat = av_find_input_format("video4linux2");
if (iformat)
{
osg::notify(osg::NOTICE)<<"Found input format"<<std::endl;
}
else
{
osg::notify(osg::NOTICE)<<"Failed to find input_format"<<std::endl;
}
if (av_open_input_file(&p_format_context, filename.c_str(), iformat, 0, &formatParams) != 0)
throw std::runtime_error("av_open_input_file() failed");
}
else
{
if (av_open_input_file(&p_format_context, filename.c_str(), 0, 0, 0) !=0 )
throw std::runtime_error("av_open_input_file() failed");
}
m_format_context.reset(p_format_context);
// Retrieve stream info
if (av_find_stream_info(p_format_context) < 0)
throw std::runtime_error("av_find_stream_info() failed");
m_duration = double(m_format_context->duration) / AV_TIME_BASE;
m_start = double(m_format_context->start_time) / AV_TIME_BASE;
// TODO move this elsewhere
m_clocks.reset(m_start);
// Dump info to stderr
dump_format(p_format_context, 0, filename.c_str(), false);
// Find and open the first video and audio streams (note that audio stream is optional and only opened if possible)
findVideoStream();
findAudioStream();
m_video_decoder.open(m_video_stream);
try
{
m_audio_decoder.open(m_audio_stream);
}
catch (const std::runtime_error & error)
{
osg::notify(osg::WARN) << "FFmpegImageStream::open audio failed, audio stream will be disabled: " << error.what() << std::endl;
}
}
catch (const std::runtime_error & error)
{
osg::notify(osg::WARN) << "FFmpegImageStream::open : " << error.what() << std::endl;
return false;
}
return true;
}
void FFmpegDecoder::close(bool waitForThreadToExit)
{
flushAudioQueue();
flushVideoQueue();
m_audio_decoder.close(waitForThreadToExit);
m_video_decoder.close(waitForThreadToExit);
}
bool FFmpegDecoder::readNextPacket()
{
switch (m_state)
{
case NORMAL:
return readNextPacketNormal();
case END_OF_STREAM:
return readNextPacketEndOfStream();
case REWINDING:
return readNextPacketRewinding();
default:
assert(false);
return false;
}
}
void FFmpegDecoder::rewind()
{
m_pending_packet.clear();
flushAudioQueue();
flushVideoQueue();
rewindButDontFlushQueues();
}
void FFmpegDecoder::findAudioStream()
{
for (unsigned int i = 0; i < m_format_context->nb_streams; ++i)
{
if (m_format_context->streams[i]->codec->codec_type == CODEC_TYPE_AUDIO)
{
m_audio_stream = m_format_context->streams[i];
m_audio_index = i;
return;
}
}
m_audio_stream = 0;
m_audio_index = std::numeric_limits<unsigned int>::max();
}
void FFmpegDecoder::findVideoStream()
{
for (unsigned int i = 0; i < m_format_context->nb_streams; ++i)
{
if (m_format_context->streams[i]->codec->codec_type == CODEC_TYPE_VIDEO)
{
m_video_stream = m_format_context->streams[i];
m_video_index = i;
return;
}
}
throw std::runtime_error("could not find a video stream");
}
inline void FFmpegDecoder::flushAudioQueue()
{
FFmpegPacketClear pc;
m_audio_queue.flush(pc);
}
inline void FFmpegDecoder::flushVideoQueue()
{
FFmpegPacketClear pc;
m_video_queue.flush(pc);
}
bool FFmpegDecoder::readNextPacketNormal()
{
AVPacket packet;
if (! m_pending_packet)
{
bool end_of_stream = false;
// Read the next frame packet
if (av_read_frame(m_format_context.get(), &packet) < 0)
{
if (url_ferror(m_format_context->pb) == 0)
end_of_stream = true;
else
throw std::runtime_error("av_read_frame() failed");
}
if (end_of_stream)
{
// If we reach the end of the stream, change the decoder state
if (loop())
rewindButDontFlushQueues();
else
m_state = END_OF_STREAM;
return false;
}
else
{
// Make the packet data available beyond av_read_frame() logical scope.
if (av_dup_packet(&packet) < 0)
throw std::runtime_error("av_dup_packet() failed");
m_pending_packet = FFmpegPacket(packet);
}
}
// Send data packet
if (m_pending_packet.type == FFmpegPacket::PACKET_DATA)
{
if (m_pending_packet.packet.stream_index == m_audio_index)
{
if (m_audio_queue.timedPush(m_pending_packet, 10)) {
m_pending_packet.release();
return true;
}
}
else if (m_pending_packet.packet.stream_index == m_video_index)
{
if (m_video_queue.timedPush(m_pending_packet, 10)) {
m_pending_packet.release();
return true;
}
}
else
{
m_pending_packet.clear();
return true;
}
}
return false;
}
bool FFmpegDecoder::readNextPacketEndOfStream()
{
const FFmpegPacket packet(FFmpegPacket::PACKET_END_OF_STREAM);
m_audio_queue.timedPush(packet, 10);
m_video_queue.timedPush(packet, 10);
return false;
}
bool FFmpegDecoder::readNextPacketRewinding()
{
const FFmpegPacket packet(FFmpegPacket::PACKET_FLUSH);
if (m_audio_queue.timedPush(packet, 10) && m_video_queue.timedPush(packet, 10))
m_state = NORMAL;
return false;
}
void FFmpegDecoder::rewindButDontFlushQueues()
{
const AVRational AvTimeBaseQ = { 1, AV_TIME_BASE }; // = AV_TIME_BASE_Q
const int64_t pos = m_clocks.getStartTime() * AV_TIME_BASE;
const int64_t seek_target = av_rescale_q(pos, AvTimeBaseQ, m_video_stream->time_base);
if (av_seek_frame(m_format_context.get(), m_video_index, seek_target, 0/*AVSEEK_FLAG_BYTE |*/ /*AVSEEK_FLAG_BACKWARD*/) < 0)
throw std::runtime_error("av_seek_frame failed()");
m_state = REWINDING;
}
} // namespace osgFFmpeg

View File

@@ -0,0 +1,175 @@
#ifndef HEADER_GUARD_OSGFFMPEG_FFMPEG_DECODER_H
#define HEADER_GUARD_OSGFFMPEG_FFMPEG_DECODER_H
#include "FFmpegDecoderAudio.hpp"
#include "FFmpegDecoderVideo.hpp"
#include <osg/Notify>
namespace osgFFmpeg {
class FormatContextPtr
{
public:
typedef AVFormatContext T;
explicit FormatContextPtr() : _ptr(0) {}
explicit FormatContextPtr(T* ptr) : _ptr(ptr) {}
~FormatContextPtr()
{
cleanup();
}
T* get() { return _ptr; }
T * operator-> () const // never throws
{
return _ptr;
}
void reset(T* ptr)
{
if (ptr==_ptr) return;
cleanup();
_ptr = ptr;
}
void cleanup()
{
if (_ptr)
{
osg::notify(osg::NOTICE)<<"Calling av_close_input_file("<<_ptr<<")"<<std::endl;
av_close_input_file(_ptr);
}
_ptr = 0;
}
protected:
T* _ptr;
};
class FFmpegDecoder : public osg::Referenced
{
public:
FFmpegDecoder();
~FFmpegDecoder();
bool open(const std::string & filename);
void close(bool waitForThreadToExit);
bool readNextPacket();
void rewind();
void loop(bool loop);
bool loop() const;
double duration() const;
FFmpegDecoderAudio & audio_decoder();
FFmpegDecoderVideo & video_decoder();
FFmpegDecoderAudio const & audio_decoder() const;
FFmpegDecoderVideo const & video_decoder() const;
protected:
enum State
{
NORMAL,
END_OF_STREAM,
REWINDING
};
typedef BoundedMessageQueue<FFmpegPacket> PacketQueue;
void findAudioStream();
void findVideoStream();
void flushAudioQueue();
void flushVideoQueue();
bool readNextPacketNormal();
bool readNextPacketEndOfStream();
bool readNextPacketRewinding();
void rewindButDontFlushQueues();
FormatContextPtr m_format_context;
AVStream * m_audio_stream;
AVStream * m_video_stream;
unsigned int m_audio_index;
unsigned int m_video_index;
FFmpegClocks m_clocks;
FFmpegPacket m_pending_packet;
PacketQueue m_audio_queue;
PacketQueue m_video_queue;
FFmpegDecoderAudio m_audio_decoder;
FFmpegDecoderVideo m_video_decoder;
double m_duration;
double m_start;
State m_state;
bool m_loop;
};
inline void FFmpegDecoder::loop(const bool loop)
{
m_loop = loop;
}
inline bool FFmpegDecoder::loop() const
{
return m_loop;
}
inline double FFmpegDecoder::duration() const
{
return double(m_format_context->duration) / AV_TIME_BASE;
}
inline FFmpegDecoderAudio & FFmpegDecoder::audio_decoder()
{
return m_audio_decoder;
}
inline FFmpegDecoderVideo & FFmpegDecoder::video_decoder()
{
return m_video_decoder;
}
inline FFmpegDecoderAudio const & FFmpegDecoder::audio_decoder() const
{
return m_audio_decoder;
}
inline FFmpegDecoderVideo const & FFmpegDecoder::video_decoder() const
{
return m_video_decoder;
}
} // namespace osgFFmpeg
#endif // HEADER_GUARD_OSGFFMPEG_FFMPEG_DECODER_H

View File

@@ -0,0 +1,318 @@
#include "FFmpegDecoderAudio.hpp"
#include <osg/Notify>
#include <stdexcept>
#include <string.h>
//DEBUG
//#include <iostream>
namespace osgFFmpeg {
FFmpegDecoderAudio::FFmpegDecoderAudio(PacketQueue & packets, FFmpegClocks & clocks) :
m_packets(packets),
m_clocks(clocks),
m_stream(0),
m_context(0),
m_packet_data(0),
m_bytes_remaining(0),
m_audio_buffer((AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2),
m_audio_buf_size(0),
m_audio_buf_index(0),
m_end_of_stream(false),
m_exit(false)
{
}
FFmpegDecoderAudio::~FFmpegDecoderAudio()
{
if (isRunning())
{
m_exit = true;
#if 0
while(isRunning()) { OpenThreads::YieldCurrentThread(); }
#else
join();
#endif
}
}
void FFmpegDecoderAudio::open(AVStream * const stream)
{
try
{
// Sound can be optional (i.e. no audio stream is present)
if (stream == 0)
return;
m_stream = stream;
m_context = stream->codec;
m_frequency = m_context->sample_rate;
m_nb_channels = m_context->channels;
m_sample_format = osg::AudioStream::SampleFormat(m_context->sample_fmt);
// Check stream sanity
if (m_context->codec_id == CODEC_ID_NONE)
throw std::runtime_error("invalid audio codec");;
// Find the decoder for the audio stream
AVCodec * const p_codec = avcodec_find_decoder(m_context->codec_id);
if (p_codec == 0)
throw std::runtime_error("avcodec_find_decoder() failed");
// Inform the codec that we can handle truncated bitstreams
//if (p_codec->capabilities & CODEC_CAP_TRUNCATED)
// m_context->flags |= CODEC_FLAG_TRUNCATED;
// Open codec
if (avcodec_open(m_context, p_codec) < 0)
throw std::runtime_error("avcodec_open() failed");
}
catch (...)
{
m_context = 0;
throw;
}
}
void FFmpegDecoderAudio::close(bool waitForThreadToExit)
{
m_exit = true;
if (isRunning() && waitForThreadToExit)
{
while(isRunning()) { OpenThreads::Thread::YieldCurrentThread(); }
}
}
void FFmpegDecoderAudio::run()
{
try
{
decodeLoop();
}
catch (const std::exception & error)
{
osg::notify(osg::WARN) << "FFmpegDecoderAudio::run : " << error.what() << std::endl;
}
catch (...)
{
osg::notify(osg::WARN) << "FFmpegDecoderAudio::run : unhandled exception" << std::endl;
}
}
void FFmpegDecoderAudio::setAudioSink(osg::ref_ptr<osg::AudioSink> audio_sink)
{
// The FFmpegDecoderAudio object takes the responsability of destroying the audio_sink.
osg::notify(osg::NOTICE)<<"Assigning "<<audio_sink<<std::endl;
m_audio_sink = audio_sink;
}
void FFmpegDecoderAudio::fillBuffer(void * const buffer, size_t size)
{
size_t filled = 0;
uint8_t * dst_buffer = reinterpret_cast<uint8_t*>(buffer);
while (size != 0)
{
if (m_audio_buf_index == m_audio_buf_size)
{
m_audio_buf_index = 0;
// Pre-fetch audio buffer is empty, refill it.
const size_t bytes_decoded = decodeFrame(&m_audio_buffer[0], m_audio_buffer.size());
// If nothing could be decoded (e.g. error or no packet available), output a bit of silence
if (bytes_decoded == 0)
{
m_audio_buf_size = std::min(Buffer::size_type(1024), m_audio_buffer.size());
memset(&m_audio_buffer[0], 0, m_audio_buf_size);
}
else
{
m_audio_buf_size = bytes_decoded;
}
}
const size_t fill_size = std::min(m_audio_buf_size - m_audio_buf_index, size);
memcpy(dst_buffer, &m_audio_buffer[m_audio_buf_index], fill_size);
size -= fill_size;
dst_buffer += fill_size;
m_audio_buf_index += fill_size;
adjustBufferEndTps(fill_size);
}
}
void FFmpegDecoderAudio::decodeLoop()
{
const bool skip_audio = ! validContext() || ! m_audio_sink.valid();
if (! skip_audio && ! m_audio_sink->playing())
{
m_clocks.audioSetDelay(m_audio_sink->getDelay());
m_audio_sink->startPlaying();
}
else
{
m_clocks.audioDisable();
}
while (! m_exit)
{
// If skipping audio, make sure the audio stream is still consumed.
if (skip_audio)
{
bool is_empty;
FFmpegPacket packet = m_packets.timedPop(is_empty, 10);
if (packet.valid())
packet.clear();
}
// Else, just idle in this thread.
// Note: If m_audio_sink has an audio callback, this thread will still be awaken
// from time to time to refill the audio buffer.
else
{
OpenThreads::Thread::microSleep(10000);
}
}
}
void FFmpegDecoderAudio::adjustBufferEndTps(const size_t buffer_size)
{
int sample_size = nbChannels() * frequency();
switch (sampleFormat())
{
case osg::AudioStream::SAMPLE_FORMAT_U8:
sample_size *= 1;
break;
case osg::AudioStream::SAMPLE_FORMAT_S16:
sample_size *= 2;
break;
case osg::AudioStream::SAMPLE_FORMAT_S24:
sample_size *= 3;
break;
case osg::AudioStream::SAMPLE_FORMAT_S32:
sample_size *= 4;
break;
case osg::AudioStream::SAMPLE_FORMAT_F32:
sample_size *= 4;
break;
default:
throw std::runtime_error("unsupported audio sample format");
}
m_clocks.audioAdjustBufferEndPts(double(buffer_size) / double(sample_size));
}
size_t FFmpegDecoderAudio::decodeFrame(void * const buffer, const size_t size)
{
for (;;)
{
// Decode current packet
while (m_bytes_remaining > 0)
{
int data_size = size;
const int bytes_decoded = avcodec_decode_audio2(m_context, reinterpret_cast<int16_t*>(buffer), &data_size, m_packet_data, m_bytes_remaining);
if (bytes_decoded < 0)
{
// if error, skip frame
m_bytes_remaining = 0;
break;
}
m_bytes_remaining -= bytes_decoded;
m_packet_data += bytes_decoded;
// If we have some data, return it and come back for more later.
if (data_size > 0)
return data_size;
}
// Get next packet
if (m_packet.valid())
m_packet.clear();
if (m_exit)
return 0;
bool is_empty = true;
m_packet = m_packets.tryPop(is_empty);
if (is_empty)
return 0;
if (m_packet.type == FFmpegPacket::PACKET_DATA)
{
if (m_packet.packet.pts != AV_NOPTS_VALUE)
{
const double pts = av_q2d(m_stream->time_base) * m_packet.packet.pts;
m_clocks.audioSetBufferEndPts(pts);
}
m_bytes_remaining = m_packet.packet.size;
m_packet_data = m_packet.packet.data;
}
else if (m_packet.type == FFmpegPacket::PACKET_END_OF_STREAM)
{
m_end_of_stream = true;
}
else if (m_packet.type == FFmpegPacket::PACKET_FLUSH)
{
avcodec_flush_buffers(m_context);
m_clocks.rewindAudio();
}
// just output silence when we reached the end of stream
if (m_end_of_stream)
{
memset(buffer, 0, size);
return size;
}
}
}
} // namespace osgFFmpeg

View File

@@ -0,0 +1,109 @@
#ifndef HEADER_GUARD_OSGFFMPEG_FFMPEG_DECODER_AUDIO_H
#define HEADER_GUARD_OSGFFMPEG_FFMPEG_DECODER_AUDIO_H
#include <OpenThreads/Thread>
#include "FFmpegClocks.hpp"
#include "FFmpegPacket.hpp"
#include <osg/AudioStream>
#include "BoundedMessageQueue.hpp"
namespace osgFFmpeg {
class FFmpegDecoderAudio : public OpenThreads::Thread
{
public:
typedef BoundedMessageQueue<FFmpegPacket> PacketQueue;
typedef void (* PublishFunc) (const FFmpegDecoderAudio & decoder, void * user_data);
FFmpegDecoderAudio(PacketQueue & packets, FFmpegClocks & clocks);
~FFmpegDecoderAudio();
void open(AVStream * stream);
void close(bool waitForThreadToExit);
virtual void run();
void setAudioSink(osg::ref_ptr<osg::AudioSink> audio_sink);
void fillBuffer(void * buffer, size_t size);
bool validContext() const;
int frequency() const;
int nbChannels() const;
osg::AudioStream::SampleFormat sampleFormat() const;
private:
typedef osg::ref_ptr<osg::AudioSink> SinkPtr;
typedef std::vector<uint8_t> Buffer;
void decodeLoop();
void adjustBufferEndTps(size_t buffer_size);
size_t decodeFrame(void * buffer, size_t size);
PacketQueue & m_packets;
FFmpegClocks & m_clocks;
AVStream * m_stream;
AVCodecContext * m_context;
FFmpegPacket m_packet;
const uint8_t * m_packet_data;
int m_bytes_remaining;
Buffer m_audio_buffer;
size_t m_audio_buf_size;
size_t m_audio_buf_index;
int m_frequency;
int m_nb_channels;
osg::AudioStream::SampleFormat m_sample_format;
SinkPtr m_audio_sink;
bool m_end_of_stream;
volatile bool m_exit;
};
inline bool FFmpegDecoderAudio::validContext() const
{
return m_context != 0;
}
inline int FFmpegDecoderAudio::frequency() const
{
return m_frequency;
}
inline int FFmpegDecoderAudio::nbChannels() const
{
return m_nb_channels;
}
inline osg::AudioStream::SampleFormat FFmpegDecoderAudio::sampleFormat() const
{
return m_sample_format;
}
} // namespace osgFFmpeg
#endif // HEADER_GUARD_OSGFFMPEG_FFMPEG_DECODER_AUDIO_H

View File

@@ -0,0 +1,362 @@
#include "FFmpegDecoderVideo.hpp"
#include <osg/Notify>
#include <osg/Timer>
#include <stdexcept>
#include <string.h>
namespace osgFFmpeg {
FFmpegDecoderVideo::FFmpegDecoderVideo(PacketQueue & packets, FFmpegClocks & clocks) :
m_packets(packets),
m_clocks(clocks),
m_stream(0),
m_context(0),
m_codec(0),
m_packet_data(0),
m_bytes_remaining(0),
m_packet_pts(AV_NOPTS_VALUE),
m_writeBuffer(0),
m_user_data(0),
m_publish_func(0),
m_exit(false)
#ifdef USE_SWSCALE
,m_swscale_ctx(0)
#endif
{
}
FFmpegDecoderVideo::~FFmpegDecoderVideo()
{
osg::notify(osg::NOTICE)<<"Destructing FFmpegDecoderVideo..."<<std::endl;
if (isRunning())
{
m_exit = true;
#if 0
while(isRunning()) { OpenThreads::YieldCurrentThread(); }
#else
join();
#endif
}
#ifdef USE_SWSCALE
if (m_swscale_ctx)
{
sws_freeContext(m_swscale_ctx);
m_swscale_ctx = 0;
}
#endif
osg::notify(osg::NOTICE)<<"Destructed FFmpegDecoderVideo"<<std::endl;
}
void FFmpegDecoderVideo::open(AVStream * const stream)
{
m_stream = stream;
m_context = stream->codec;
// Trust the video size given at this point
// (avcodec_open seems to sometimes return a 0x0 size)
m_width = m_context->width;
m_height = m_context->height;
findAspectRatio();
// Find out whether we support Alpha channel
m_alpha_channel = (m_context->pix_fmt == PIX_FMT_YUVA420P);
// Find out the framerate
m_frame_rate = av_q2d(stream->r_frame_rate);
// Find the decoder for the video stream
m_codec = avcodec_find_decoder(m_context->codec_id);
if (m_codec == 0)
throw std::runtime_error("avcodec_find_decoder() failed");
// Inform the codec that we can handle truncated bitstreams
//if (p_codec->capabilities & CODEC_CAP_TRUNCATED)
// m_context->flags |= CODEC_FLAG_TRUNCATED;
// Open codec
if (avcodec_open(m_context, m_codec) < 0)
throw std::runtime_error("avcodec_open() failed");
// Allocate video frame
m_frame.reset(avcodec_alloc_frame());
// Allocate converted RGB frame
m_frame_rgba.reset(avcodec_alloc_frame());
m_buffer_rgba[0].resize(avpicture_get_size(PIX_FMT_RGB32, width(), height()));
m_buffer_rgba[1].resize(m_buffer_rgba[0].size());
// Assign appropriate parts of the buffer to image planes in m_frame_rgba
avpicture_fill((AVPicture *) (m_frame_rgba).get(), &(m_buffer_rgba[0])[0], PIX_FMT_RGB32, width(), height());
// Override get_buffer()/release_buffer() from codec context in order to retrieve the PTS of each frame.
m_context->opaque = this;
m_context->get_buffer = getBuffer;
m_context->release_buffer = releaseBuffer;
}
void FFmpegDecoderVideo::close(bool waitForThreadToExit)
{
m_exit = true;
if (isRunning() && waitForThreadToExit)
{
while(isRunning()) { OpenThreads::Thread::YieldCurrentThread(); }
}
}
void FFmpegDecoderVideo::run()
{
try
{
decodeLoop();
}
catch (const std::exception & error)
{
osg::notify(osg::WARN) << "FFmpegDecoderVideo::run : " << error.what() << std::endl;
}
catch (...)
{
osg::notify(osg::WARN) << "FFmpegDecoderVideo::run : unhandled exception" << std::endl;
}
}
void FFmpegDecoderVideo::decodeLoop()
{
FFmpegPacket packet;
double pts;
while (! m_exit)
{
// Work on the current packet until we have decoded all of it
while (m_bytes_remaining > 0)
{
// Save global PTS to be stored in m_frame via getBuffer()
m_packet_pts = packet.packet.pts;
// Decode video frame
int frame_finished = 0;
const int bytes_decoded = avcodec_decode_video(m_context, m_frame.get(), &frame_finished, m_packet_data, m_bytes_remaining);
if (bytes_decoded < 0)
throw std::runtime_error("avcodec_decode_video failed()");
m_bytes_remaining -= bytes_decoded;
m_packet_data += bytes_decoded;
// Find out the frame pts
if (packet.packet.dts == AV_NOPTS_VALUE && m_frame->opaque != 0 && *reinterpret_cast<const int64_t*>(m_frame->opaque) != AV_NOPTS_VALUE)
{
pts = *reinterpret_cast<const int64_t*>(m_frame->opaque);
}
else if (packet.packet.dts != AV_NOPTS_VALUE)
{
pts = packet.packet.dts;
}
else
{
pts = 0;
}
pts *= av_q2d(m_stream->time_base);
// Publish the frame if we have decoded a complete frame
if (frame_finished)
{
const double synched_pts = m_clocks.videoSynchClock(m_frame.get(), av_q2d(m_stream->time_base), pts);
const double frame_delay = m_clocks.videoRefreshSchedule(synched_pts);
publishFrame(frame_delay);
}
}
// Get the next packet
pts = 0;
if (packet.valid())
packet.clear();
bool is_empty = true;
packet = m_packets.timedPop(is_empty, 10);
if (! is_empty)
{
if (packet.type == FFmpegPacket::PACKET_DATA)
{
m_bytes_remaining = packet.packet.size;
m_packet_data = packet.packet.data;
}
else if (packet.type == FFmpegPacket::PACKET_FLUSH)
{
avcodec_flush_buffers(m_context);
m_clocks.rewindVideo();
}
}
}
}
void FFmpegDecoderVideo::findAspectRatio()
{
double ratio = 0.0;
if (m_context->sample_aspect_ratio.num != 0)
ratio = (av_q2d(m_context->sample_aspect_ratio) * m_width) / m_height;
if (ratio <= 0.0)
ratio = double(m_width) / double(m_height);
m_aspect_ratio = ratio;
}
int FFmpegDecoderVideo::convert(AVPicture *dst, int dst_pix_fmt, const AVPicture *src,
int src_pix_fmt, int src_width, int src_height)
{
osg::Timer_t startTick = osg::Timer::instance()->tick();
#ifdef USE_SWSCALE
if (m_swscale_ctx==0)
{
m_swscale_ctx = sws_getContext(src_width, src_height, src_pix_fmt,
src_width, src_height, dst_pix_fmt,
/*SWS_BILINEAR*/ SWS_BICUBIC, NULL, NULL, NULL);
}
osg::notify(osg::NOTICE)<<"Using sws_scale ";
int result = sws_scale(m_swscale_ctx,
src->data, src->linesize, 0, src_height,
dst->data, dst->linesize);
#else
osg::notify(osg::NOTICE)<<"Using img_convert ";
int result = img_convert(dst, dst_pix_fmt, src,
src_pix_fmt, src_width, src_height);
#endif
osg::Timer_t endTick = osg::Timer::instance()->tick();
osg::notify(osg::NOTICE)<<" time = "<<osg::Timer::instance()->delta_m(startTick,endTick)<<"ms"<<std::endl;
return result;
}
void FFmpegDecoderVideo::publishFrame(const double delay)
{
// If no publishing function, just ignore the frame
if (m_publish_func == 0)
return;
// If the display delay is too small, we better skip the frame.
if (delay < -0.010)
return;
const AVPicture * const src = (const AVPicture *) m_frame.get();
AVPicture * const dst = (AVPicture *) m_frame_rgba.get();
// Assign appropriate parts of the buffer to image planes in m_frame_rgba
avpicture_fill((AVPicture *) (m_frame_rgba).get(), &(m_buffer_rgba[m_writeBuffer])[0], PIX_FMT_RGB32, width(), height());
// Convert YUVA420p (i.e. YUV420p plus alpha channel) using our own routine
if (m_context->pix_fmt == PIX_FMT_YUVA420P)
yuva420pToRgba(dst, src, width(), height());
else
convert(dst, PIX_FMT_RGB32, src, m_context->pix_fmt, width(), height());
// Wait 'delay' seconds before publishing the picture.
int i_delay = static_cast<int>(delay * 1000000 + 0.5);
while (i_delay > 1000)
{
// Avoid infinite/very long loops
if (m_exit)
return;
const int micro_delay = (std::min)(1000000, i_delay);
OpenThreads::Thread::microSleep(micro_delay);
i_delay -= micro_delay;
}
m_writeBuffer = 1-m_writeBuffer;
m_publish_func(* this, m_user_data);
}
void FFmpegDecoderVideo::yuva420pToRgba(AVPicture * const dst, const AVPicture * const src, int width, int height)
{
convert(dst, PIX_FMT_RGB32, src, m_context->pix_fmt, width, height);
const size_t bpp = 4;
uint8_t * a_dst = dst->data[0] + 3;
for (int h = 0; h < height; ++h) {
const uint8_t * a_src = src->data[3] + h * src->linesize[3];
for (int w = 0; w < width; ++w) {
*a_dst = *a_src;
a_dst += bpp;
a_src += 1;
}
}
}
int FFmpegDecoderVideo::getBuffer(AVCodecContext * const context, AVFrame * const picture)
{
const FFmpegDecoderVideo * const this_ = reinterpret_cast<const FFmpegDecoderVideo*>(context->opaque);
const int result = avcodec_default_get_buffer(context, picture);
int64_t * p_pts = reinterpret_cast<int64_t*>( av_malloc(sizeof(int64_t)) );
*p_pts = this_->m_packet_pts;
picture->opaque = p_pts;
return result;
}
void FFmpegDecoderVideo::releaseBuffer(AVCodecContext * const context, AVFrame * const picture)
{
if (picture != 0)
av_freep(&picture->opaque);
avcodec_default_release_buffer(context, picture);
}
} // namespace osgFFmpeg

View File

@@ -0,0 +1,187 @@
#ifndef HEADER_GUARD_OSGFFMPEG_FFMPEG_DECODER_VIDEO_H
#define HEADER_GUARD_OSGFFMPEG_FFMPEG_DECODER_VIDEO_H
#include "FFmpegHeaders.hpp"
#include "BoundedMessageQueue.hpp"
#include "FFmpegClocks.hpp"
#include "FFmpegPacket.hpp"
#include <OpenThreads/Thread>
#include <vector>
namespace osgFFmpeg {
class FramePtr
{
public:
typedef AVFrame T;
explicit FramePtr() : _ptr(0) {}
explicit FramePtr(T* ptr) : _ptr(ptr) {}
~FramePtr()
{
cleanup();
}
T* get() { return _ptr; }
T * operator-> () const // never throws
{
return _ptr;
}
void reset(T* ptr)
{
if (ptr==_ptr) return;
cleanup();
_ptr = ptr;
}
void cleanup()
{
if (_ptr) av_free(_ptr);
_ptr = 0;
}
protected:
T* _ptr;
};
class FFmpegDecoderVideo : public OpenThreads::Thread
{
public:
typedef BoundedMessageQueue<FFmpegPacket> PacketQueue;
typedef void (* PublishFunc) (const FFmpegDecoderVideo & decoder, void * user_data);
FFmpegDecoderVideo(PacketQueue & packets, FFmpegClocks & clocks);
~FFmpegDecoderVideo();
void open(AVStream * stream);
void close(bool waitForThreadToExit);
virtual void run();
void setUserData(void * user_data);
void setPublishCallback(PublishFunc function);
int width() const;
int height() const;
double aspectRatio() const;
bool alphaChannel() const;
double frameRate() const;
const uint8_t * image() const;
private:
typedef std::vector<uint8_t> Buffer;
void decodeLoop();
void findAspectRatio();
void publishFrame(double delay);
double synchronizeVideo(double pts);
void yuva420pToRgba(AVPicture *dst, const AVPicture *src, int width, int height);
int convert(AVPicture *dst, int dst_pix_fmt, const AVPicture *src,
int src_pix_fmt, int src_width, int src_height);
static int getBuffer(AVCodecContext * context, AVFrame * picture);
static void releaseBuffer(AVCodecContext * context, AVFrame * picture);
PacketQueue & m_packets;
FFmpegClocks & m_clocks;
AVStream * m_stream;
AVCodecContext * m_context;
AVCodec * m_codec;
const uint8_t * m_packet_data;
int m_bytes_remaining;
int64_t m_packet_pts;
FramePtr m_frame;
FramePtr m_frame_rgba;
Buffer m_buffer_rgba[2];
int m_writeBuffer;
void * m_user_data;
PublishFunc m_publish_func;
double m_frame_rate;
double m_aspect_ratio;
int m_width;
int m_height;
size_t m_next_frame_index;
bool m_alpha_channel;
volatile bool m_exit;
#ifdef USE_SWSCALE
struct SwsContext * m_swscale_ctx;
#endif
};
inline void FFmpegDecoderVideo::setUserData(void * const user_data)
{
m_user_data = user_data;
}
inline void FFmpegDecoderVideo::setPublishCallback(const PublishFunc function)
{
m_publish_func = function;
}
inline int FFmpegDecoderVideo::width() const
{
return m_width;
}
inline int FFmpegDecoderVideo::height() const
{
return m_height;
}
inline double FFmpegDecoderVideo::aspectRatio() const
{
return m_aspect_ratio;
}
inline bool FFmpegDecoderVideo::alphaChannel() const
{
return m_alpha_channel;
}
inline double FFmpegDecoderVideo::frameRate() const
{
return m_frame_rate;
}
inline const uint8_t * FFmpegDecoderVideo::image() const
{
return &((m_buffer_rgba[1-m_writeBuffer])[0]);
}
} // namespace osgFFmpeg
#endif // HEADER_GUARD_OSGFFMPEG_FFMPEG_DECODER_VIDEO_H

View File

@@ -0,0 +1,22 @@
#ifndef HEADER_GUARD_FFMPEG_HEADERS_H
#define HEADER_GUARD_FFMPEG_HEADERS_H
extern "C"
{
#define __STDC_CONSTANT_MACROS
#include <stdint.h>
#include <avcodec.h>
#include <avformat.h>
#include <avdevice.h>
#ifdef USE_SWSCALE
#include <swscale.h>
#endif
}
#endif // HEADER_GUARD_FFMPEG_HEADERS_H

View File

@@ -0,0 +1,306 @@
#include "FFmpegImageStream.hpp"
#include "FFmpegAudioStream.hpp"
#include <OpenThreads/ScopedLock>
#include <osg/Notify>
#include <memory>
namespace osgFFmpeg {
FFmpegImageStream::FFmpegImageStream() :
m_decoder(0),
m_commands(0),
m_frame_published_flag(false)
{
setOrigin(osg::Image::BOTTOM_LEFT);
std::auto_ptr<FFmpegDecoder> decoder(new FFmpegDecoder);
std::auto_ptr<CommandQueue> commands(new CommandQueue);
m_decoder = decoder.release();
m_commands = commands.release();
}
FFmpegImageStream::FFmpegImageStream(const FFmpegImageStream & image, const osg::CopyOp & copyop) :
osg::ImageStream(image, copyop)
{
// TODO: probably incorrect or incomplete
}
FFmpegImageStream::~FFmpegImageStream()
{
osg::notify(osg::NOTICE)<<"Destructing FFMpegImageStream..."<<std::endl;
quit(true);
osg::notify(osg::NOTICE)<<"Have done quit"<<std::endl;
// release athe audio streams to make sure that the decoder doesn't retain any external
// refences.
getAudioStreams().clear();
// destroy the decoder and associated threads
m_decoder = 0;
delete m_commands;
osg::notify(osg::NOTICE)<<"Destructed FFMpegImageStream."<<std::endl;
}
bool FFmpegImageStream::open(const std::string & filename)
{
setFileName(filename);
if (! m_decoder->open(filename))
return false;
setImage(
m_decoder->video_decoder().width(), m_decoder->video_decoder().height(), 1, GL_RGBA, GL_BGRA, GL_UNSIGNED_BYTE,
const_cast<unsigned char *>(m_decoder->video_decoder().image()), NO_DELETE
);
setOrigin(osg::Image::TOP_LEFT);
m_decoder->video_decoder().setUserData(this);
m_decoder->video_decoder().setPublishCallback(publishNewFrame);
if (m_decoder->audio_decoder().validContext())
{
osg::notify(osg::NOTICE)<<"Attaching FFmpegAudioStream"<<std::endl;
getAudioStreams().push_back(new FFmpegAudioStream(m_decoder.get()));
}
_status = PAUSED;
applyLoopingMode();
start(); // start thread
return true;
}
void FFmpegImageStream::play()
{
m_commands->push(CMD_PLAY);
#if 0
// Wait for at least one frame to be published before returning the call
OpenThreads::ScopedLock<Mutex> lock(m_mutex);
while (duration() > 0 && ! m_frame_published_flag)
m_frame_published_cond.wait(&m_mutex);
#endif
}
void FFmpegImageStream::pause()
{
m_commands->push(CMD_PAUSE);
}
void FFmpegImageStream::rewind()
{
m_commands->push(CMD_REWIND);
}
void FFmpegImageStream::quit(bool waitForThreadToExit)
{
// Stop the packet producer thread
if (isRunning())
{
m_commands->push(CMD_STOP);
if (waitForThreadToExit)
join();
}
// Close the decoder (i.e. flush the decoder packet queues)
m_decoder->close(waitForThreadToExit);
}
double FFmpegImageStream::duration() const
{
return m_decoder->duration();
}
bool FFmpegImageStream::videoAlphaChannel() const
{
return m_decoder->video_decoder().alphaChannel();
}
double FFmpegImageStream::videoAspectRatio() const
{
return m_decoder->video_decoder().aspectRatio();
}
double FFmpegImageStream::videoFrameRate() const
{
return m_decoder->video_decoder().frameRate();
}
void FFmpegImageStream::run()
{
try
{
bool done = false;
while (! done)
{
if (_status == PLAYING)
{
bool no_cmd;
const Command cmd = m_commands->timedPop(no_cmd, 1);
if (no_cmd)
{
m_decoder->readNextPacket();
}
else
done = ! handleCommand(cmd);
}
else
{
done = ! handleCommand(m_commands->pop());
}
}
}
catch (const std::exception & error)
{
osg::notify(osg::WARN) << "FFmpegImageStream::run : " << error.what() << std::endl;
}
catch (...)
{
osg::notify(osg::WARN) << "FFmpegImageStream::run : unhandled exception" << std::endl;
}
osg::notify(osg::NOTICE)<<"Finished FFmpegImageStream::run()"<<std::endl;
}
void FFmpegImageStream::applyLoopingMode()
{
m_decoder->loop(getLoopingMode() == LOOPING);
}
bool FFmpegImageStream::handleCommand(const Command cmd)
{
switch (cmd)
{
case CMD_PLAY:
cmdPlay();
return true;
case CMD_PAUSE:
cmdPause();
return true;
case CMD_REWIND:
cmdRewind();
return true;
case CMD_STOP:
return false;
default:
assert(false);
return false;
}
}
void FFmpegImageStream::cmdPlay()
{
if (_status == PAUSED)
{
if (! m_decoder->audio_decoder().isRunning())
m_decoder->audio_decoder().start();
if (! m_decoder->video_decoder().isRunning())
m_decoder->video_decoder().start();
}
_status = PLAYING;
}
void FFmpegImageStream::cmdPause()
{
if (_status == PLAYING)
{
}
_status = PAUSED;
}
void FFmpegImageStream::cmdRewind()
{
m_decoder->rewind();
}
void FFmpegImageStream::publishNewFrame(const FFmpegDecoderVideo &, void * user_data)
{
FFmpegImageStream * const this_ = reinterpret_cast<FFmpegImageStream*>(user_data);
#if 1
this_->setImage(
this_->m_decoder->video_decoder().width(), this_->m_decoder->video_decoder().height(), 1, GL_RGBA, GL_BGRA, GL_UNSIGNED_BYTE,
const_cast<unsigned char *>(this_->m_decoder->video_decoder().image()), NO_DELETE
);
#else
/** \bug If viewer.realize() hasn't been already called, this doesn't work? */
this_->dirty();
#endif
OpenThreads::ScopedLock<Mutex> lock(this_->m_mutex);
if (! this_->m_frame_published_flag)
{
this_->m_frame_published_flag = true;
this_->m_frame_published_cond.signal();
}
}
} // namespace osgFFmpeg

View File

@@ -0,0 +1,80 @@
#ifndef HEADER_GUARD_OSGFFMPEG_FFMPEG_IMAGE_STREAM_H
#define HEADER_GUARD_OSGFFMPEG_FFMPEG_IMAGE_STREAM_H
#include <osg/ImageStream>
#include <OpenThreads/Condition>
#include <OpenThreads/Thread>
#include "FFmpegDecoder.hpp"
#include "MessageQueue.hpp"
namespace osgFFmpeg
{
template <class T>
class MessageQueue;
class FFmpegImageStream : public osg::ImageStream, public OpenThreads::Thread
{
public:
FFmpegImageStream();
FFmpegImageStream(const FFmpegImageStream & image, const osg::CopyOp & copyop = osg::CopyOp::SHALLOW_COPY);
META_Object(osgFFmpeg, FFmpegImageStream);
bool open(const std::string & filename);
virtual void play();
virtual void pause();
virtual void rewind();
virtual void quit(bool waitForThreadToExit = true);
double duration() const;
bool videoAlphaChannel() const;
double videoAspectRatio() const;
double videoFrameRate() const;
private:
enum Command
{
CMD_PLAY,
CMD_PAUSE,
CMD_STOP,
CMD_REWIND
};
typedef MessageQueue<Command> CommandQueue;
typedef OpenThreads::Mutex Mutex;
typedef OpenThreads::Condition Condition;
virtual ~FFmpegImageStream();
virtual void run();
virtual void applyLoopingMode();
bool handleCommand(Command cmd);
void cmdPlay();
void cmdPause();
void cmdRewind();
static void publishNewFrame(const FFmpegDecoderVideo &, void * user_data);
osg::ref_ptr<FFmpegDecoder> m_decoder;
CommandQueue * m_commands;
Mutex m_mutex;
Condition m_frame_published_cond;
bool m_frame_published_flag;
};
}
#endif // HEADER_GUARD_OSGFFMPEG_FFMPEG_IMAGE_STREAM_H

View File

@@ -0,0 +1,82 @@
#ifndef HEADER_GUARD_OSGFFMPEG_FFMPEG_PACKET_H
#define HEADER_GUARD_OSGFFMPEG_FFMPEG_PACKET_H
#include "FFmpegHeaders.hpp"
namespace osgFFmpeg
{
struct FFmpegPacket
{
enum Type
{
PACKET_DATA,
PACKET_END_OF_STREAM,
PACKET_FLUSH
};
FFmpegPacket() :
type(PACKET_DATA)
{
packet.data = 0;
}
explicit FFmpegPacket(const Type type) :
type(type)
{
packet.data = 0;
}
explicit FFmpegPacket(const AVPacket & packet) :
packet(packet),
type(PACKET_DATA)
{
}
void clear()
{
if (packet.data != 0)
av_free_packet(&packet);
release();
}
void release()
{
packet.data = 0;
type = PACKET_DATA;
}
bool valid() const
{
return (type != PACKET_DATA) ^ (packet.data != 0);
}
bool operator ! () const
{
return ! valid();
}
AVPacket packet;
Type type;
};
struct FFmpegPacketClear
{
void operator () (FFmpegPacket & packet) const
{
packet.clear();
}
};
}
#endif // HEADER_GUARD_OSGFFMPEG_FFMPEG_PACKET_H

View File

@@ -0,0 +1,23 @@
#ifndef HEADER_GUARD_OSGFFMPEG_FFMPEG_SAMPLE_FORMAT_H
#define HEADER_GUARD_OSGFFMPEG_FFMPEG_SAMPLE_FORMAT_H
namespace osgFFmpeg
{
enum FFmpegSampleFormat
{
SAMPLE_FORMAT_U8, //= SAMPLE_FMT_U8,
SAMPLE_FORMAT_S16, //= SAMPLE_FMT_S16,
SAMPLE_FORMAT_S24, //= SAMPLE_FMT_S24,
SAMPLE_FORMAT_S32, //= SAMPLE_FMT_S32,
SAMPLE_FORMAT_F32 //= SAMPLE_FMT_FLT
};
}
#endif // HEADER_GUARD_OSGFFMPEG_FFMPEG_SAMPLE_FORMAT_H

View File

@@ -0,0 +1,156 @@
#ifndef HEADER_GUARD_OSGFFMPEG_MESSAGE_QUEUE_H
#define HEADER_GUARD_OSGFFMPEG_MESSAGE_QUEUE_H
#include <OpenThreads/Condition>
#include <OpenThreads/Mutex>
#include <OpenThreads/ScopedLock>
#include <deque>
namespace osgFFmpeg {
template <class T>
class MessageQueue
{
public:
typedef T value_type;
typedef size_t size_type;
MessageQueue();
~MessageQueue();
void clear();
void push(const T & value);
value_type pop();
value_type tryPop(bool & is_empty);
value_type timedPop(bool & is_empty, unsigned long ms);
private:
MessageQueue(const MessageQueue &);
MessageQueue & operator = (const MessageQueue &);
typedef std::deque<T> Queue;
typedef OpenThreads::Condition Condition;
typedef OpenThreads::Mutex Mutex;
typedef OpenThreads::ScopedLock<Mutex> ScopedLock;
Mutex m_mutex;
Condition m_not_empty;
Queue m_queue;
};
template <class T>
MessageQueue<T>::MessageQueue()
{
}
template <class T>
MessageQueue<T>::~MessageQueue()
{
}
template <class T>
void MessageQueue<T>::clear()
{
ScopedLock lock(m_mutex);
m_queue.clear();
}
template <class T>
void MessageQueue<T>::push(const T & value)
{
{
ScopedLock lock(m_mutex);
m_queue.push_back(value);
}
m_not_empty.signal();
}
template <class T>
typename MessageQueue<T>::value_type MessageQueue<T>::pop()
{
ScopedLock lock(m_mutex);
while (m_queue.empty())
m_not_empty.wait(&m_mutex);
const value_type value = m_queue.front();
m_queue.pop_front();
return value;
}
template <class T>
typename MessageQueue<T>::value_type MessageQueue<T>::tryPop(bool & is_empty)
{
ScopedLock lock(m_mutex);
is_empty = m_queue.empty();
if (is_empty)
return value_type();
const value_type value = m_queue.front();
m_queue.pop_front();
return value;
}
template <class T>
typename MessageQueue<T>::value_type MessageQueue<T>::timedPop(bool & is_empty, const unsigned long ms)
{
ScopedLock lock(m_mutex);
// We don't wait in a loop to avoid an infinite loop (as the ms timeout would not be decremented).
// This means that timedPop() could return with (is_empty = true) before the timeout has been hit.
if (m_queue.empty())
m_not_empty.wait(&m_mutex, ms);
is_empty = m_queue.empty();
if (is_empty)
return value_type();
const value_type value = m_queue.front();
m_queue.pop_front();
return value;
}
} // namespace osgFFmpeg
#endif // HEADER_GUARD_OSGFFMPEG_MESSAGE_QUEUE_H

View File

@@ -0,0 +1,94 @@
/* -*-c++-*- OpenSceneGraph - Copyright (C) 1998-2006 Robert Osfield
*
* This library is open source and may be redistributed and/or modified under
* the terms of the OpenSceneGraph Public License (OSGPL) version 0.0 or
* (at your option) any later version. The full license is in LICENSE file
* included with this distribution, and on the openscenegraph.org website.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* OpenSceneGraph Public License for more details.
*/
#include <osgDB/Registry>
#include <osgDB/FileNameUtils>
#include <osgDB/FileUtils>
#include "FFmpegHeaders.hpp"
#include "FFmpegImageStream.hpp"
/** Implementation heavily inspired by http://www.dranger.com/ffmpeg/ */
class ReaderWriterFFmpeg : public osgDB::ReaderWriter
{
public:
ReaderWriterFFmpeg()
{
supportsProtocol("http","Read video/audio from http using ffmpeg.");
supportsExtension("avi", "");
supportsExtension("flv", "");
supportsExtension("mov", "");
supportsExtension("ogg", "Theora movie format");
supportsExtension("mpg", "Mpeg movie format");
supportsExtension("mpv", "Mpeg movie format");
supportsExtension("wmv", "");
// Register all FFmpeg formats/codecs
av_register_all();
}
virtual ~ReaderWriterFFmpeg()
{
}
virtual const char * className() const
{
return "ReaderWriterFFmpeg";
}
virtual ReadResult readImage(const std::string & filename, const osgDB::ReaderWriter::Options * options) const
{
if (filename.compare(0, 5, "/dev/")==0)
{
return readImageStream(filename, options);
}
const std::string ext = osgDB::getLowerCaseFileExtension(filename);
if (! acceptsExtension(ext))
return ReadResult::FILE_NOT_HANDLED;
const std::string path = osgDB::containsServerAddress(filename) ?
filename :
osgDB::findDataFile(filename, options);
if (path.empty())
return ReadResult::FILE_NOT_FOUND;
return readImageStream(filename, options);
}
ReadResult readImageStream(const std::string& filename, const osgDB::ReaderWriter::Options * options) const
{
osg::notify(osg::INFO) << "ReaderWriterFFmpeg::readImage " << filename << std::endl;
osg::ref_ptr<osgFFmpeg::FFmpegImageStream> image_stream(new osgFFmpeg::FFmpegImageStream);
if (! image_stream->open(filename))
return ReadResult::FILE_NOT_HANDLED;
return image_stream.release();
}
private:
};
REGISTER_OSGPLUGIN(ffmpeg, ReaderWriterFFmpeg)