diff --git a/plugins/VOIP/gui/AudioInputConfig.cpp b/plugins/VOIP/gui/AudioInputConfig.cpp index 29a27026a..5f6aacfb8 100644 --- a/plugins/VOIP/gui/AudioInputConfig.cpp +++ b/plugins/VOIP/gui/AudioInputConfig.cpp @@ -34,6 +34,7 @@ #include "AudioInputConfig.h" #include "audiodevicehelper.h" #include "AudioWizard.h" +#include "gui/VideoProcessor.h" #include "gui/common/RSGraphWidget.h" #include "util/RsProtectedTimer.h" @@ -99,35 +100,36 @@ voipGraph::voipGraph(QWidget *parent) AudioInputConfig::AudioInputConfig(QWidget * parent, Qt::WindowFlags flags) : ConfigPage(parent, flags) { - std::cerr << "Creating audioInputConfig object" << std::endl; + std::cerr << "Creating audioInputConfig object" << std::endl; - /* Invoke the Qt Designer generated object setup routine */ - ui.setupUi(this); + /* Invoke the Qt Designer generated object setup routine */ + ui.setupUi(this); - loaded = false; + loaded = false; - inputAudioProcessor = NULL; - inputAudioDevice = NULL; - abSpeech = NULL; - qtTick = NULL; + inputAudioProcessor = NULL; + inputAudioDevice = NULL; + abSpeech = NULL; + qtTick = NULL; - // Create the video pipeline. - // - videoInput = new QVideoInputDevice(this) ; - videoInput->setEchoVideoTarget(ui.videoDisplay) ; - videoInput->setVideoEncoder(new JPEGVideoEncoder()) ; + // Create the video pipeline. + // + videoInput = new QVideoInputDevice(this) ; + videoInput->setEchoVideoTarget(ui.videoDisplay) ; - videoDecoder = new JPEGVideoDecoder; - videoDecoder->setDisplayTarget(NULL) ; + videoProcessor = new VideoProcessor() ; + videoProcessor->setDisplayTarget(NULL) ; - graph_source = new voipGraphSource ; - ui.voipBwGraph->setSource(graph_source); + videoInput->setVideoProcessor(videoProcessor) ; - graph_source->setVideoInput(videoInput) ; - graph_source->setCollectionTimeLimit(1000*300) ; - graph_source->start() ; + graph_source = new voipGraphSource ; + ui.voipBwGraph->setSource(graph_source); - QObject::connect(ui.showEncoded_CB,SIGNAL(toggled(bool)),this,SLOT(togglePreview(bool))) ; + graph_source->setVideoInput(videoInput) ; + graph_source->setCollectionTimeLimit(1000*300) ; + graph_source->start() ; + + QObject::connect(ui.showEncoded_CB,SIGNAL(toggled(bool)),this,SLOT(togglePreview(bool))) ; } void AudioInputConfig::togglePreview(bool b) @@ -135,12 +137,12 @@ void AudioInputConfig::togglePreview(bool b) if(b) { videoInput->setEchoVideoTarget(NULL) ; - videoDecoder->setDisplayTarget(ui.videoDisplay) ; + videoProcessor->setDisplayTarget(ui.videoDisplay) ; } else { videoInput->setEchoVideoTarget(ui.videoDisplay) ; - videoDecoder->setDisplayTarget(NULL) ; + videoProcessor->setDisplayTarget(NULL) ; } } @@ -367,7 +369,7 @@ void AudioInputConfig::on_Tick_timeout() { while(videoInput->getNextEncodedPacket(chunk)) { - videoDecoder->receiveEncodedData(static_cast(chunk.data),chunk.size) ; + videoProcessor->receiveEncodedData(chunk) ; chunk.clear() ; } } diff --git a/plugins/VOIP/gui/AudioInputConfig.h b/plugins/VOIP/gui/AudioInputConfig.h index faaadb63e..e838744b4 100644 --- a/plugins/VOIP/gui/AudioInputConfig.h +++ b/plugins/VOIP/gui/AudioInputConfig.h @@ -69,7 +69,7 @@ class AudioInputConfig : public ConfigPage //VideoDecoder *videoDecoder ; //VideoEncoder *videoEncoder ; QVideoInputDevice *videoInput ; - VideoDecoder *videoDecoder ; + VideoProcessor *videoProcessor ; bool loaded; voipGraphSource *graph_source ; diff --git a/plugins/VOIP/gui/QVideoDevice.cpp b/plugins/VOIP/gui/QVideoDevice.cpp index 3a7ee029e..e9ace94f9 100644 --- a/plugins/VOIP/gui/QVideoDevice.cpp +++ b/plugins/VOIP/gui/QVideoDevice.cpp @@ -11,7 +11,7 @@ QVideoInputDevice::QVideoInputDevice(QWidget *parent) { _timer = NULL ; _capture_device = NULL ; - _video_encoder = NULL ; + _video_processor = NULL ; _echo_output_device = NULL ; _estimated_bw = 0 ; _total_encoded_size = 0 ; @@ -78,11 +78,11 @@ void QVideoInputDevice::grabFrame() QImage image = QImage(img_rgb.data,img_rgb.cols,img_rgb.rows,QImage::Format_RGB888); - if(_video_encoder != NULL) + if(_video_processor != NULL) { uint32_t encoded_size ; - _video_encoder->addImage(image,0,encoded_size) ; + _video_processor->processImage(image,0,encoded_size) ; std::cerr << "Encoded size = " << encoded_size << std::endl; _total_encoded_size += encoded_size ; @@ -107,8 +107,8 @@ void QVideoInputDevice::grabFrame() bool QVideoInputDevice::getNextEncodedPacket(RsVOIPDataChunk& chunk) { - if(_video_encoder) - return _video_encoder->nextPacket(chunk) ; + if(_video_processor) + return _video_processor->nextEncodedPacket(chunk) ; else return false ; } diff --git a/plugins/VOIP/gui/QVideoDevice.h b/plugins/VOIP/gui/QVideoDevice.h index 513932487..d12fe5f38 100644 --- a/plugins/VOIP/gui/QVideoDevice.h +++ b/plugins/VOIP/gui/QVideoDevice.h @@ -2,6 +2,7 @@ #include #include "interface/rsVOIP.h" +#include "gui/VideoProcessor.h" class VideoEncoder ; class CvCapture ; @@ -31,7 +32,7 @@ class QVideoInputDevice: public QObject // Captured images are sent to this encoder. Can be NULL. // - void setVideoEncoder(VideoEncoder *venc) { _video_encoder = venc ; } + void setVideoProcessor(VideoProcessor *venc) { _video_processor = venc ; } // All images received will be echoed to this target. We could use signal/slots, but it's // probably faster this way. Can be NULL. @@ -58,7 +59,7 @@ class QVideoInputDevice: public QObject void networkPacketReady() ; private: - VideoEncoder *_video_encoder ; + VideoProcessor *_video_processor ; QTimer *_timer ; CvCapture *_capture_device ; diff --git a/plugins/VOIP/gui/VOIPChatWidgetHolder.cpp b/plugins/VOIP/gui/VOIPChatWidgetHolder.cpp index 1d94960d7..4d1f8ff49 100644 --- a/plugins/VOIP/gui/VOIPChatWidgetHolder.cpp +++ b/plugins/VOIP/gui/VOIPChatWidgetHolder.cpp @@ -123,8 +123,7 @@ VOIPChatWidgetHolder::VOIPChatWidgetHolder(ChatWidget *chatWidget, VOIPNotify *n inputAudioDevice = NULL ; inputVideoDevice = new QVideoInputDevice(mChatWidget) ; // not started yet ;-) - inputVideoProcessor = new JPEGVideoEncoder ; - outputVideoProcessor = new JPEGVideoDecoder ; + videoProcessor = new VideoProcessor ; // Make a widget with two video devices, one for echo, and one for the talking peer. videoWidget = new QWidget(mChatWidget) ; @@ -144,8 +143,8 @@ VOIPChatWidgetHolder::VOIPChatWidgetHolder(ChatWidget *chatWidget, VOIPNotify *n mChatWidget->addChatHorizontalWidget(videoWidget) ; inputVideoDevice->setEchoVideoTarget(echoVideoDevice) ; - inputVideoDevice->setVideoEncoder(inputVideoProcessor) ; - outputVideoProcessor->setDisplayTarget(outputVideoDevice) ; + inputVideoDevice->setVideoProcessor(videoProcessor) ; + videoProcessor->setDisplayTarget(outputVideoDevice) ; } VOIPChatWidgetHolder::~VOIPChatWidgetHolder() @@ -154,8 +153,7 @@ VOIPChatWidgetHolder::~VOIPChatWidgetHolder() inputAudioDevice->stop() ; delete inputVideoDevice ; - delete inputVideoProcessor ; - delete outputVideoProcessor ; + delete videoProcessor ; button_map::iterator it = buttonMapTakeVideo.begin(); while (it != buttonMapTakeVideo.end()) { @@ -287,42 +285,50 @@ void VOIPChatWidgetHolder::toggleVideoCapture() void VOIPChatWidgetHolder::addVideoData(const RsPeerId &peer_id, QByteArray* array) { - if (!videoCaptureToggleButton->isChecked()) { - if (mChatWidget) { - QString buttonName = QString::fromUtf8(rsPeers->getPeerName(peer_id).c_str()); - if (buttonName.isEmpty()) buttonName = "VoIP";//TODO maybe change all with GxsId - button_map::iterator it = buttonMapTakeVideo.find(buttonName); - if (it == buttonMapTakeVideo.end()){ - mChatWidget->addChatMsg(true, tr("VoIP Status"), QDateTime::currentDateTime(), QDateTime::currentDateTime() - , tr("%1 inviting you to start a video conversation. do you want Accept or Decline the invitation?").arg(buttonName), ChatWidget::MSGTYPE_SYSTEM); - RSButtonOnText *button = mChatWidget->getNewButtonOnTextBrowser(tr("Accept Video Call")); - button->setToolTip(tr("Activate camera")); - button->setStyleSheet(QString("border: 1px solid #199909;") - .append("font-size: 12pt; color: white;") - .append("min-width: 128px; min-height: 24px;") - .append("border-radius: 6px;") - .append("background-color: qlineargradient(x1: 0, y1: 0, x2: 0, y2: 0.67, " - "stop: 0 #22c70d, stop: 1 #116a06);") + if (!videoCaptureToggleButton->isChecked()) + { + if (mChatWidget) { + QString buttonName = QString::fromUtf8(rsPeers->getPeerName(peer_id).c_str()); + if (buttonName.isEmpty()) buttonName = "VoIP";//TODO maybe change all with GxsId + button_map::iterator it = buttonMapTakeVideo.find(buttonName); + if (it == buttonMapTakeVideo.end()){ + mChatWidget->addChatMsg(true, tr("VoIP Status"), QDateTime::currentDateTime(), QDateTime::currentDateTime() + , tr("%1 inviting you to start a video conversation. do you want Accept or Decline the invitation?").arg(buttonName), ChatWidget::MSGTYPE_SYSTEM); + RSButtonOnText *button = mChatWidget->getNewButtonOnTextBrowser(tr("Accept Video Call")); + button->setToolTip(tr("Activate camera")); + button->setStyleSheet(QString("border: 1px solid #199909;") + .append("font-size: 12pt; color: white;") + .append("min-width: 128px; min-height: 24px;") + .append("border-radius: 6px;") + .append("background-color: qlineargradient(x1: 0, y1: 0, x2: 0, y2: 0.67, " + "stop: 0 #22c70d, stop: 1 #116a06);") - ); + ); - button->updateImage(); + button->updateImage(); - connect(button,SIGNAL(clicked()),this,SLOT(startVideoCapture())); - connect(button,SIGNAL(mouseEnter()),this,SLOT(botMouseEnter())); - connect(button,SIGNAL(mouseLeave()),this,SLOT(botMouseLeave())); + connect(button,SIGNAL(clicked()),this,SLOT(startVideoCapture())); + connect(button,SIGNAL(mouseEnter()),this,SLOT(botMouseEnter())); + connect(button,SIGNAL(mouseLeave()),this,SLOT(botMouseLeave())); - buttonMapTakeVideo.insert(buttonName, button); - } - } + buttonMapTakeVideo.insert(buttonName, button); + } + } - //TODO make a sound for the incoming call -// soundManager->play(VOIP_SOUND_INCOMING_CALL); - if (mVOIPNotify) mVOIPNotify->notifyReceivedVoipVideoCall(peer_id); + //TODO make a sound for the incoming call + // soundManager->play(VOIP_SOUND_INCOMING_CALL); + if (mVOIPNotify) mVOIPNotify->notifyReceivedVoipVideoCall(peer_id); - } else { - outputVideoProcessor->receiveEncodedData((unsigned char *)array->data(),array->size()) ; - } + } + else + { + RsVOIPDataChunk chunk ; + chunk.type = RsVOIPDataChunk::RS_VOIP_DATA_TYPE_VIDEO ; + chunk.size = array->size() ; + chunk.data = array->data() ; + + videoProcessor->receiveEncodedData(chunk) ; + } } void VOIPChatWidgetHolder::botMouseEnter() @@ -359,7 +365,7 @@ void VOIPChatWidgetHolder::botMouseLeave() void VOIPChatWidgetHolder::setAcceptedBandwidth(uint32_t bytes_per_sec) { - inputVideoProcessor->setMaximumFrameRate(bytes_per_sec) ; + videoProcessor->setMaximumFrameRate(bytes_per_sec) ; } void VOIPChatWidgetHolder::addAudioData(const RsPeerId &peer_id, QByteArray* array) diff --git a/plugins/VOIP/gui/VOIPChatWidgetHolder.h b/plugins/VOIP/gui/VOIPChatWidgetHolder.h index 791a2dcf8..350f49f2c 100644 --- a/plugins/VOIP/gui/VOIPChatWidgetHolder.h +++ b/plugins/VOIP/gui/VOIPChatWidgetHolder.h @@ -34,8 +34,7 @@ class QAudioInput; class QAudioOutput; class QVideoInputDevice ; class QVideoOutputDevice ; -class VideoEncoder ; -class VideoDecoder ; +class VideoProcessor ; #define VOIP_SOUND_INCOMING_CALL "VOIP_incoming_call" @@ -82,8 +81,7 @@ protected: QWidget *videoWidget ; // pointer to call show/hide - VideoEncoder *inputVideoProcessor; - VideoDecoder *outputVideoProcessor; + VideoProcessor *videoProcessor; // Additional buttons to the chat bar QToolButton *audioListenToggleButton ; diff --git a/plugins/VOIP/gui/VideoProcessor.cpp b/plugins/VOIP/gui/VideoProcessor.cpp index b19ee03d0..b3d0a7c27 100644 --- a/plugins/VOIP/gui/VideoProcessor.cpp +++ b/plugins/VOIP/gui/VideoProcessor.cpp @@ -1,4 +1,5 @@ #include +#include #include #include @@ -7,127 +8,182 @@ #include "VideoProcessor.h" #include "QVideoDevice.h" -VideoDecoder::VideoDecoder() +VideoProcessor::VideoProcessor() + :_encoded_frame_size(256,256) { - _output_device = NULL ; + _decoded_output_device = NULL ; } -VideoEncoder::VideoEncoder() - :_frame_size(256,256) +bool VideoProcessor::processImage(const QImage& img,uint32_t size_hint,uint32_t& encoded_size) { + VideoCodec *codec ; + + switch(_encoding_current_codec) + { + case VIDEO_PROCESSOR_CODEC_ID_JPEG_VIDEO: codec = &_jpeg_video_codec ; + break ; + case VIDEO_PROCESSOR_CODEC_ID_DDWT_VIDEO: codec = &_ddwt_video_codec ; + break ; + default: + codec = NULL ; + } + + // std::cerr << "reducing to " << _frame_size.width() << " x " << _frame_size.height() << std::endl; + + void *data = NULL; + encoded_size = 0 ; + + if(codec) + { + RsVOIPDataChunk chunk ; + + codec->encodeData(img.scaled(_encoded_frame_size,Qt::IgnoreAspectRatio,Qt::SmoothTransformation),size_hint,chunk) ; + + if(chunk.size == 0) // the codec might be buffering the frame for compression reasons + return true ; + + _encoded_out_queue.push_back(chunk) ; + + return true ; + } + else + return false ; } -bool VideoEncoder::addImage(const QImage& img,uint32_t size_hint,uint32_t& encoded_size) +bool VideoProcessor::nextEncodedPacket(RsVOIPDataChunk& chunk) { -// std::cerr << "reducing to " << _frame_size.width() << " x " << _frame_size.height() << std::endl; - encodeData(img.scaled(_frame_size,Qt::IgnoreAspectRatio,Qt::SmoothTransformation),size_hint,encoded_size) ; - //encodeData(img,size_hint,encoded_size) ; - - return true ; -} - -bool VideoEncoder::nextPacket(RsVOIPDataChunk& chunk) -{ - if(_out_queue.empty()) + if(_encoded_out_queue.empty()) return false ; - chunk = _out_queue.front() ; - _out_queue.pop_front() ; + chunk = _encoded_out_queue.front() ; + _encoded_out_queue.pop_front() ; return true ; } -void VideoDecoder::receiveEncodedData(const unsigned char *data,uint32_t size) +void VideoProcessor::setInternalFrameSize(QSize s) { - if(_output_device) - _output_device->showFrame(decodeData(data,size)) ; + _encoded_frame_size = s ; } -QImage JPEGVideoDecoder::decodeData(const unsigned char *encoded_image_data,uint32_t size) +void VideoProcessor::receiveEncodedData(const RsVOIPDataChunk& chunk) { static const int HEADER_SIZE = 4 ; // read frame type. Use first 4 bytes to give info about content. + // + // Byte Meaning Values + // 00 Codec CODEC_ID_JPEG_VIDEO Basic Jpeg codec + // CODEC_ID_DDWT_VIDEO Differential wavelet compression + // + // 01 Unused Might be useful later + // + // 0203 Flags Codec specific flags. + // - if(size < HEADER_SIZE) + if(chunk.size < HEADER_SIZE) { - std::cerr << "JPEGVideoDecoder::decodeData(): Too small a data packet. size=" << size << std::endl; - return QImage() ; + std::cerr << "JPEGVideoDecoder::decodeData(): Too small a data packet. size=" << chunk.size << std::endl; + return ; } - uint32_t flags = encoded_image_data[0] + (encoded_image_data[1] << 8) ; + uint32_t codid = ((unsigned char *)chunk.data)[0] + (((unsigned char *)chunk.data)[1] << 8) ; + uint16_t flags = ((unsigned char *)chunk.data)[2] + (((unsigned char *)chunk.data)[3] << 8) ; - // un-compress image data + VideoCodec *codec ; - QByteArray qb((char*)&encoded_image_data[HEADER_SIZE],(int)size - HEADER_SIZE) ; - QImage image ; - if(!image.loadFromData(qb,"JPEG")) + switch(codid) { - std::cerr << "image.loadFromData(): returned an error.: " << std::endl; - return QImage() ; + case VIDEO_PROCESSOR_CODEC_ID_JPEG_VIDEO: codec = &_jpeg_video_codec ; + break ; + case VIDEO_PROCESSOR_CODEC_ID_DDWT_VIDEO: codec = &_ddwt_video_codec ; + break ; + default: + codec = NULL ; } + QImage img ; - // now see if the frame is a differential frame, or just a reference frame. + if(codec != NULL) + codec->decodeData(chunk,img) ; - if(flags & JPEG_VIDEO_FLAGS_DIFFERENTIAL_FRAME) - { - if(_reference_frame.size() != image.size()) - { - std::cerr << "Bad reference frame!" << std::endl; - return image ; - } - - QImage res = _reference_frame ; - - for(uint32_t i=0;ishowFrame(img) ; } -void VideoEncoder::setMaximumFrameRate(uint32_t bytes_per_sec) +void VideoProcessor::setMaximumFrameRate(uint32_t bytes_per_sec) { std::cerr << "Video Encoder: maximum frame rate is set to " << bytes_per_sec << " Bps" << std::endl; } -void VideoEncoder::setInternalFrameSize(QSize s) -{ - _frame_size = s ; -} +////////////////////////////////////////////////////////////////////////////////////////////////////////////// +// +////////////////////////////////////////////////////////////////////////////////////////////////////////////// -JPEGVideoEncoder::JPEGVideoEncoder() - : _ref_frame_max_distance(50),_ref_frame_count(50) +JPEGVideo::JPEGVideo() + : _encoded_ref_frame_max_distance(50),_encoded_ref_frame_count(50) { } -void JPEGVideoEncoder::encodeData(const QImage& image,uint32_t /* size_hint */,uint32_t& encoded_size) +bool JPEGVideo::decodeData(const RsVOIPDataChunk& chunk,QImage& image) +{ + // now see if the frame is a differential frame, or just a reference frame. + + uint16_t codec = ((unsigned char *)chunk.data)[0] + (((unsigned char *)chunk.data)[1] << 8) ; + uint16_t flags = ((unsigned char *)chunk.data)[2] + (((unsigned char *)chunk.data)[3] << 8) ; + + assert(codec == VideoProcessor::VIDEO_PROCESSOR_CODEC_ID_JPEG_VIDEO) ; + + // un-compress image data + + QByteArray qb((char*)&((uint8_t*)chunk.data)[HEADER_SIZE],(int)chunk.size - HEADER_SIZE) ; + + if(!image.loadFromData(qb,"JPEG")) + { + std::cerr << "image.loadFromData(): returned an error.: " << std::endl; + return false ; + } + + + if(flags & JPEG_VIDEO_FLAGS_DIFFERENTIAL_FRAME) + { + if(_decoded_reference_frame.size() != image.size()) + { + std::cerr << "Bad reference frame!" << std::endl; + return false ; + } + + QImage res = _decoded_reference_frame ; + + for(uint32_t i=0;i> 8) & 0xff ; - ((unsigned char *)voip_chunk.data)[2] = 0 ; - ((unsigned char *)voip_chunk.data)[3] = 0 ; + ((unsigned char *)voip_chunk.data)[0] = VideoProcessor::VIDEO_PROCESSOR_CODEC_ID_JPEG_VIDEO & 0xff ; + ((unsigned char *)voip_chunk.data)[1] = (VideoProcessor::VIDEO_PROCESSOR_CODEC_ID_JPEG_VIDEO >> 8) & 0xff ; + ((unsigned char *)voip_chunk.data)[2] = flags & 0xff ; + ((unsigned char *)voip_chunk.data)[3] = (flags >> 8) & 0xff ; memcpy(voip_chunk.data+HEADER_SIZE,qb.data(),qb.size()) ; + voip_chunk.size = HEADER_SIZE + qb.size() ; voip_chunk.type = RsVOIPDataChunk::RS_VOIP_DATA_TYPE_VIDEO ; - _out_queue.push_back(voip_chunk) ; - - encoded_size = voip_chunk.size ; + return true ; } diff --git a/plugins/VOIP/gui/VideoProcessor.h b/plugins/VOIP/gui/VideoProcessor.h index 42124b4f4..d103207af 100644 --- a/plugins/VOIP/gui/VideoProcessor.h +++ b/plugins/VOIP/gui/VideoProcessor.h @@ -6,58 +6,91 @@ class QVideoOutputDevice ; +class VideoCodec +{ +public: + virtual bool encodeData(const QImage& Image, uint32_t size_hint, RsVOIPDataChunk& chunk) = 0; + virtual bool decodeData(const RsVOIPDataChunk& chunk,QImage& image) = 0; +}; + +// Now derive various image encoding/decoding algorithms. +// + +class JPEGVideo: public VideoCodec +{ +public: + JPEGVideo() ; + +protected: + virtual bool encodeData(const QImage& Image, uint32_t size_hint, RsVOIPDataChunk& chunk) ; + virtual bool decodeData(const RsVOIPDataChunk& chunk,QImage& image) ; + + static const uint32_t HEADER_SIZE = 0x04 ; + static const uint32_t JPEG_VIDEO_FLAGS_DIFFERENTIAL_FRAME = 0x0001 ; +private: + QImage _decoded_reference_frame ; + QImage _encoded_reference_frame ; + + uint32_t _encoded_ref_frame_max_distance ; // max distance between two reference frames. + uint32_t _encoded_ref_frame_count ; +}; + +class DifferentialWaveletVideo: public VideoCodec +{ +public: + DifferentialWaveletVideo() {} + +protected: + virtual bool encodeData(const QImage& Image, uint32_t size_hint, RsVOIPDataChunk& chunk) { return true ; } + virtual bool decodeData(const RsVOIPDataChunk& chunk,QImage& image) { return true ; } + +private: + QImage _last_reference_frame ; +}; + // This class decodes video from a stream. It keeps a queue of // decoded frame that needs to be retrieved using the getNextImage() method. // -class VideoDecoder +class VideoProcessor { public: - VideoDecoder() ; - virtual ~VideoDecoder() {} + VideoProcessor() ; + virtual ~VideoProcessor() {} + enum CodecId { + VIDEO_PROCESSOR_CODEC_ID_UNKNOWN = 0x0000, + VIDEO_PROCESSOR_CODEC_ID_JPEG_VIDEO = 0x0001, + VIDEO_PROCESSOR_CODEC_ID_DDWT_VIDEO = 0x0002 + }; + +// ===================================================================================== +// =------------------------------------ DECODING -------------------------------------= +// ===================================================================================== + // Gets the next image to be displayed. Once returned, the image should // be cleared from the incoming queue. // - void setDisplayTarget(QVideoOutputDevice *odev) { _output_device = odev ; } - - virtual void receiveEncodedData(const unsigned char *data,uint32_t size) ; + void setDisplayTarget(QVideoOutputDevice *odev) { _decoded_output_device = odev ; } + virtual void receiveEncodedData(const RsVOIPDataChunk& chunk) ; // returns the current (measured) frame rate in bytes per second. // - uint32_t currentFrameRate() const; + uint32_t currentDecodingFrameRate() const; private: - QVideoOutputDevice *_output_device ; + QVideoOutputDevice *_decoded_output_device ; + std::list _decoded_image_queue ; - std::list _image_queue ; - - // Incoming data is processed by a video codec and converted into images. - // - virtual QImage decodeData(const unsigned char *encoded_image,uint32_t encoded_image_size) = 0 ; - -// // This buffer accumulated incoming encoded data, until a full packet is obtained, -// // since the stream might not send images at once. When incoming images are decoded, the -// // data is removed from the buffer. -// // -// unsigned char *buffer ; -// uint32_t buffer_size ; -}; - -// This class encodes video using a video codec (possibly homemade, or based on existing codecs) -// and produces a data stream that is sent to the network transfer service (e.g. p3VOIP). -// -class VideoEncoder -{ +// ===================================================================================== +// =------------------------------------ ENCODING -------------------------------------= +// ===================================================================================== + public: - VideoEncoder() ; - virtual ~VideoEncoder() {} - // Takes the next image to be encoded. // - bool addImage(const QImage& Image, uint32_t size_hint, uint32_t &encoded_size) ; - - bool packetReady() const { return !_out_queue.empty() ; } - bool nextPacket(RsVOIPDataChunk& ) ; + bool processImage(const QImage& Image, uint32_t size_hint, uint32_t &encoded_size) ; + bool encodedPacketReady() const { return !_encoded_out_queue.empty() ; } + bool nextEncodedPacket(RsVOIPDataChunk& ) ; // Used to tweak the compression ratio so that the video can stream ok. // @@ -65,63 +98,16 @@ class VideoEncoder void setInternalFrameSize(QSize) ; protected: - virtual void encodeData(const QImage& Image, uint32_t size_hint, uint32_t &encoded_size) =0; - - std::list _out_queue ; + std::list _encoded_out_queue ; + QSize _encoded_frame_size ; + +// ===================================================================================== +// =------------------------------------- Codecs --------------------------------------= +// ===================================================================================== - QSize _frame_size ; + JPEGVideo _jpeg_video_codec ; + DifferentialWaveletVideo _ddwt_video_codec ; + + uint16_t _encoding_current_codec ; }; -// Now derive various image encoding/decoding algorithms. -// - -class JPEGVideoDecoder: public VideoDecoder -{ -protected: - virtual QImage decodeData(const unsigned char *encoded_image,uint32_t encoded_image_size) ; - - static const uint32_t HEADER_SIZE = 0x04 ; - - static const uint32_t JPEG_VIDEO_FLAGS_DIFFERENTIAL_FRAME = 0x0001 ; -private: - QImage _reference_frame ; -}; - -class JPEGVideoEncoder: public VideoEncoder -{ -public: - JPEGVideoEncoder() ; - -protected: - virtual void encodeData(const QImage& Image, uint32_t size_hint, uint32_t &encoded_size) ; - - static const uint32_t HEADER_SIZE = 0x04 ; - - static const uint32_t JPEG_VIDEO_FLAGS_DIFFERENTIAL_FRAME = 0x0001 ; -private: - QImage _reference_frame ; - uint32_t _ref_frame_max_distance ; // max distance between two reference frames. - uint32_t _ref_frame_count ; -}; - -class DifferentialWaveletEncoder: public VideoEncoder -{ -public: - DifferentialWaveletEncoder() {} - -protected: - virtual void encodeData(const QImage& Image, uint32_t size_hint, uint32_t &encoded_size) ; - -}; - -class DifferentialWaveletDecoder: public VideoDecoder -{ -public: - DifferentialWaveletDecoder() {} - -protected: - virtual QImage decodeData(const unsigned char *encoded_image,uint32_t encoded_image_size) ; - -private: - QImage _last_reference_frame ; -};