added preview option to show decoded stream in Audio config

This commit is contained in:
csoler 2015-08-09 18:09:17 -04:00
parent 2107a1c858
commit c0614e70ac
8 changed files with 195 additions and 63 deletions

View File

@ -55,9 +55,9 @@ void AudioInputDialog::showEvent(QShowEvent *) {
class voipGraphSource: public RSGraphSource class voipGraphSource: public RSGraphSource
{ {
public: public:
voipGraphSource() {} voipGraphSource() : video_input(NULL) {}
void setVideoInput(QVideoInputDevice *vid) { video_input = vid ; } void setVideoInput(const QVideoInputDevice *vid) { video_input = vid ; }
virtual QString displayName(int) const { return tr("Required bandwidth") ;} virtual QString displayName(int) const { return tr("Required bandwidth") ;}
@ -73,21 +73,14 @@ public:
virtual void getValues(std::map<std::string,float>& vals) const virtual void getValues(std::map<std::string,float>& vals) const
{ {
RsVOIPDataChunk chunk ;
uint32_t total_size = 0 ;
vals.clear() ; vals.clear() ;
while(video_input && video_input->getNextEncodedPacket(chunk)) if(video_input)
{ vals[std::string("bw")] = video_input->currentBandwidth() ;
total_size += chunk.size ;
chunk.clear() ;
}
vals[std::string("bw")] = (float)total_size ;
} }
private: private:
QVideoInputDevice *video_input ; const QVideoInputDevice *video_input ;
}; };
void voipGraph::setVoipSource(voipGraphSource *gs) void voipGraph::setVoipSource(voipGraphSource *gs)
@ -127,12 +120,31 @@ AudioInputConfig::AudioInputConfig(QWidget * parent, Qt::WindowFlags flags)
videoInput->setEchoVideoTarget(ui.videoDisplay) ; videoInput->setEchoVideoTarget(ui.videoDisplay) ;
videoInput->setVideoEncoder(new JPEGVideoEncoder()) ; videoInput->setVideoEncoder(new JPEGVideoEncoder()) ;
videoDecoder = new JPEGVideoDecoder;
videoDecoder->setDisplayTarget(NULL) ;
graph_source = new voipGraphSource ; graph_source = new voipGraphSource ;
ui.voipBwGraph->setSource(graph_source); ui.voipBwGraph->setSource(graph_source);
graph_source->setVideoInput(videoInput) ; graph_source->setVideoInput(videoInput) ;
graph_source->setCollectionTimeLimit(1000*300) ; graph_source->setCollectionTimeLimit(1000*300) ;
graph_source->start() ; graph_source->start() ;
QObject::connect(ui.showEncoded_CB,SIGNAL(toggled(bool)),this,SLOT(togglePreview(bool))) ;
}
void AudioInputConfig::togglePreview(bool b)
{
if(b)
{
videoInput->setEchoVideoTarget(NULL) ;
videoDecoder->setDisplayTarget(ui.videoDisplay) ;
}
else
{
videoInput->setEchoVideoTarget(ui.videoDisplay) ;
videoDecoder->setDisplayTarget(NULL) ;
}
} }
AudioInputConfig::~AudioInputConfig() AudioInputConfig::~AudioInputConfig()
@ -352,6 +364,15 @@ void AudioInputConfig::on_Tick_timeout() {
abSpeech->iValue = iroundf(inputAudioProcessor->dVoiceAcivityLevel * 32767.0f + 0.5f); abSpeech->iValue = iroundf(inputAudioProcessor->dVoiceAcivityLevel * 32767.0f + 0.5f);
abSpeech->update(); abSpeech->update();
// also transmit encoded video
RsVOIPDataChunk chunk ;
while(videoInput->getNextEncodedPacket(chunk))
{
videoDecoder->receiveEncodedData(static_cast<const unsigned char*>(chunk.data),chunk.size) ;
chunk.clear() ;
}
} }
void AudioInputConfig::emptyBuffer() { void AudioInputConfig::emptyBuffer() {

View File

@ -69,6 +69,7 @@ class AudioInputConfig : public ConfigPage
//VideoDecoder *videoDecoder ; //VideoDecoder *videoDecoder ;
//VideoEncoder *videoEncoder ; //VideoEncoder *videoEncoder ;
QVideoInputDevice *videoInput ; QVideoInputDevice *videoInput ;
VideoDecoder *videoDecoder ;
bool loaded; bool loaded;
voipGraphSource *graph_source ; voipGraphSource *graph_source ;
@ -96,6 +97,7 @@ class AudioInputConfig : public ConfigPage
private slots: private slots:
void loadSettings(); void loadSettings();
void emptyBuffer(); void emptyBuffer();
void togglePreview(bool) ;
void on_qsTransmitHold_valueChanged(int v); void on_qsTransmitHold_valueChanged(int v);
void on_qsAmp_valueChanged(int v); void on_qsAmp_valueChanged(int v);

View File

@ -391,6 +391,16 @@
</layout> </layout>
</widget> </widget>
</item> </item>
<item>
<widget class="QCheckBox" name="showEncoded_CB">
<property name="toolTip">
<string>&lt;html&gt;&lt;head/&gt;&lt;body&gt;&lt;p&gt;Display encoded (and then decoded) frame, to check the codec's quality. If not selected, the image above only shows the frame that is grabbed from your camera.&lt;/p&gt;&lt;/body&gt;&lt;/html&gt;</string>
</property>
<property name="text">
<string>preview</string>
</property>
</widget>
</item>
<item> <item>
<spacer> <spacer>
<property name="orientation"> <property name="orientation">

View File

@ -13,6 +13,9 @@ QVideoInputDevice::QVideoInputDevice(QWidget *parent)
_capture_device = NULL ; _capture_device = NULL ;
_video_encoder = NULL ; _video_encoder = NULL ;
_echo_output_device = NULL ; _echo_output_device = NULL ;
_estimated_bw = 0 ;
_total_encoded_size = 0 ;
_last_bw_estimate_TS = time(NULL) ;
} }
void QVideoInputDevice::stop() void QVideoInputDevice::stop()
@ -73,17 +76,33 @@ void QVideoInputDevice::grabFrame()
cv::Mat img_rgb; cv::Mat img_rgb;
cv::cvtColor(cv::Mat(img), img_rgb, CV_BGR2RGB); cv::cvtColor(cv::Mat(img), img_rgb, CV_BGR2RGB);
static const int _encoded_width = 128 ; QImage image = QImage(img_rgb.data,img_rgb.cols,img_rgb.rows,QImage::Format_RGB888);
static const int _encoded_height = 128 ;
QImage image = QImage(img_rgb.data,img_rgb.cols,img_rgb.rows,QImage::Format_RGB888).scaled(QSize(_encoded_width,_encoded_height),Qt::IgnoreAspectRatio,Qt::SmoothTransformation) ;
if(_video_encoder != NULL) if(_video_encoder != NULL)
{ {
_video_encoder->addImage(image) ; uint32_t encoded_size ;
_video_encoder->addImage(image,0,encoded_size) ;
std::cerr << "Encoded size = " << encoded_size << std::endl;
_total_encoded_size += encoded_size ;
time_t now = time(NULL) ;
if(now > _last_bw_estimate_TS)
{
_estimated_bw = uint32_t(0.75*_estimated_bw + 0.25 * (_total_encoded_size / (float)(now - _last_bw_estimate_TS))) ;
_total_encoded_size = 0 ;
_last_bw_estimate_TS = now ;
std::cerr << "new bw estimate: " << _estimated_bw << std::endl;
}
emit networkPacketReady() ; emit networkPacketReady() ;
} }
if(_echo_output_device != NULL) _echo_output_device->showFrame(image) ; if(_echo_output_device != NULL)
_echo_output_device->showFrame(image) ;
} }
bool QVideoInputDevice::getNextEncodedPacket(RsVOIPDataChunk& chunk) bool QVideoInputDevice::getNextEncodedPacket(RsVOIPDataChunk& chunk)

View File

@ -42,6 +42,12 @@ class QVideoInputDevice: public QObject
// //
bool getNextEncodedPacket(RsVOIPDataChunk&) ; bool getNextEncodedPacket(RsVOIPDataChunk&) ;
// gets the estimated current bandwidth required to transmit the encoded data, in B/s
//
uint32_t currentBandwidth() const { return _estimated_bw ; }
// control
void start() ; void start() ;
void stop() ; void stop() ;
@ -59,5 +65,9 @@ class QVideoInputDevice: public QObject
QVideoOutputDevice *_echo_output_device ; QVideoOutputDevice *_echo_output_device ;
std::list<RsVOIPDataChunk> _out_queue ; std::list<RsVOIPDataChunk> _out_queue ;
uint32_t _estimated_bw ;
time_t _last_bw_estimate_TS;
uint32_t _total_encoded_size ;
}; };

View File

@ -12,9 +12,15 @@ VideoDecoder::VideoDecoder()
_output_device = NULL ; _output_device = NULL ;
} }
bool VideoEncoder::addImage(const QImage& img) VideoEncoder::VideoEncoder()
:_frame_size(128,128)
{ {
encodeData(img) ; }
bool VideoEncoder::addImage(const QImage& img,uint32_t size_hint,uint32_t& encoded_size)
{
std::cerr << "reducing to " << _frame_size.width() << " x " << _frame_size.height() << std::endl;
encodeData(img.scaled(_frame_size,Qt::IgnoreAspectRatio,Qt::SmoothTransformation),size_hint,encoded_size) ;
return true ; return true ;
} }
@ -32,6 +38,7 @@ bool VideoEncoder::nextPacket(RsVOIPDataChunk& chunk)
void VideoDecoder::receiveEncodedData(const unsigned char *data,uint32_t size) void VideoDecoder::receiveEncodedData(const unsigned char *data,uint32_t size)
{ {
if(_output_device)
_output_device->showFrame(decodeData(data,size)) ; _output_device->showFrame(decodeData(data,size)) ;
} }
@ -53,14 +60,42 @@ void VideoEncoder::setMaximumFrameRate(uint32_t bytes_per_sec)
std::cerr << "Video Encoder: maximum frame rate is set to " << bytes_per_sec << " Bps" << std::endl; std::cerr << "Video Encoder: maximum frame rate is set to " << bytes_per_sec << " Bps" << std::endl;
} }
void VideoEncoder::setInternalFrameSize(QSize s)
void JPEGVideoEncoder::encodeData(const QImage& image)
{ {
_frame_size = s ;
}
JPEGVideoEncoder::JPEGVideoEncoder()
: _ref_frame_max_distance(10),_ref_frame_count(10)
{
}
void JPEGVideoEncoder::encodeData(const QImage& image,uint32_t /* size_hint */,uint32_t& encoded_size)
{
// check if we make a diff image, or if we use the full frame.
QImage encoded_frame ;
if(_ref_frame_count++ < _ref_frame_max_distance && image.size() == _reference_frame.size())
{
// compute difference with reference frame.
encoded_frame = image ;
for(uint32_t i=0;i<image.byteCount();++i)
encoded_frame.bits()[i] = image.bits()[i] - _reference_frame.bits()[i] + 128 ;
}
else
{
_ref_frame_count = 0 ;
_reference_frame = image ;
encoded_frame = image ;
}
QByteArray qb ; QByteArray qb ;
QBuffer buffer(&qb) ; QBuffer buffer(&qb) ;
buffer.open(QIODevice::WriteOnly) ; buffer.open(QIODevice::WriteOnly) ;
image.save(&buffer,"JPEG") ; encoded_frame.save(&buffer,"JPEG") ;
RsVOIPDataChunk voip_chunk ; RsVOIPDataChunk voip_chunk ;
voip_chunk.data = malloc(qb.size()); voip_chunk.data = malloc(qb.size());
@ -69,5 +104,7 @@ void JPEGVideoEncoder::encodeData(const QImage& image)
voip_chunk.type = RsVOIPDataChunk::RS_VOIP_DATA_TYPE_VIDEO ; voip_chunk.type = RsVOIPDataChunk::RS_VOIP_DATA_TYPE_VIDEO ;
_out_queue.push_back(voip_chunk) ; _out_queue.push_back(voip_chunk) ;
encoded_size = voip_chunk.size ;
} }

View File

@ -49,12 +49,12 @@ class VideoDecoder
class VideoEncoder class VideoEncoder
{ {
public: public:
VideoEncoder() {} VideoEncoder() ;
virtual ~VideoEncoder() {} virtual ~VideoEncoder() {}
// Takes the next image to be encoded. // Takes the next image to be encoded.
// //
bool addImage(const QImage& Image) ; bool addImage(const QImage& Image, uint32_t size_hint, uint32_t &encoded_size) ;
bool packetReady() const { return !_out_queue.empty() ; } bool packetReady() const { return !_out_queue.empty() ; }
bool nextPacket(RsVOIPDataChunk& ) ; bool nextPacket(RsVOIPDataChunk& ) ;
@ -62,12 +62,14 @@ class VideoEncoder
// Used to tweak the compression ratio so that the video can stream ok. // Used to tweak the compression ratio so that the video can stream ok.
// //
void setMaximumFrameRate(uint32_t bytes_per_second) ; void setMaximumFrameRate(uint32_t bytes_per_second) ;
void setInternalFrameSize(QSize) ;
protected: protected:
//virtual bool sendEncodedData(unsigned char *mem,uint32_t size) = 0 ; virtual void encodeData(const QImage& Image, uint32_t size_hint, uint32_t &encoded_size) =0;
virtual void encodeData(const QImage& image) = 0 ;
std::list<RsVOIPDataChunk> _out_queue ; std::list<RsVOIPDataChunk> _out_queue ;
QSize _frame_size ;
}; };
// Now derive various image encoding/decoding algorithms. // Now derive various image encoding/decoding algorithms.
@ -77,14 +79,43 @@ class JPEGVideoDecoder: public VideoDecoder
{ {
protected: protected:
virtual QImage decodeData(const unsigned char *encoded_image,uint32_t encoded_image_size) ; virtual QImage decodeData(const unsigned char *encoded_image,uint32_t encoded_image_size) ;
private:
QImage _last_reference_frame ;
}; };
class JPEGVideoEncoder: public VideoEncoder class JPEGVideoEncoder: public VideoEncoder
{ {
public: public:
JPEGVideoEncoder() {} JPEGVideoEncoder() ;
protected: protected:
virtual void encodeData(const QImage& Image) ; virtual void encodeData(const QImage& Image, uint32_t size_hint, uint32_t &encoded_size) ;
private:
QImage _reference_frame ;
uint32_t _ref_frame_max_distance ; // max distance between two reference frames.
uint32_t _ref_frame_count ;
}; };
class DifferentialWaveletEncoder: public VideoEncoder
{
public:
DifferentialWaveletEncoder() {}
protected:
virtual void encodeData(const QImage& Image, uint32_t size_hint, uint32_t &encoded_size) ;
};
class DifferentialWaveletDecoder: public VideoDecoder
{
public:
DifferentialWaveletDecoder() {}
protected:
virtual QImage decodeData(const unsigned char *encoded_image,uint32_t encoded_image_size) ;
private:
QImage _last_reference_frame ;
};

View File

@ -51,7 +51,9 @@ class RsVOIPPongResult
struct RsVOIPDataChunk struct RsVOIPDataChunk
{ {
typedef enum { RS_VOIP_DATA_TYPE_AUDIO, RS_VOIP_DATA_TYPE_VIDEO } RsVOIPDataType ; typedef enum { RS_VOIP_DATA_TYPE_UNKNOWN = 0x00,
RS_VOIP_DATA_TYPE_AUDIO = 0x01,
RS_VOIP_DATA_TYPE_VIDEO = 0x02 } RsVOIPDataType ;
void *data ; // create/delete using malloc/free. void *data ; // create/delete using malloc/free.
uint32_t size ; uint32_t size ;