Started implementation of Video Chat (not working yet!).

- GUI part is done
- implemented a very basic JPEG codec
- added echo frame in configuration panel
- created a video capture object that uses OpenCV (should be cross systems)
Remains to do:
- serialise and send frames through p3VoRS
- use a serious codec (e.g. Theora+x264)
- add icons to reflect camera state (failure/working/sending/...)
- compilation on windows 



git-svn-id: http://svn.code.sf.net/p/retroshare/code/trunk@7449 b45a01b8-16f6-495d-af2f-9b41ad6348cc
This commit is contained in:
csoler 2014-07-13 13:57:25 +00:00
parent 3b88acb45d
commit b6089f3b91
18 changed files with 873 additions and 420 deletions

View File

@ -14,6 +14,7 @@ CONFIG += qt uic qrc resources
MOBILITY = multimedia
INCLUDEPATH += ../../retroshare-gui/src/temp/ui ../../libretroshare/src
INCLUDEPATH += /usr/include/opencv
#################################### Windows #####################################
@ -32,9 +33,11 @@ SOURCES = services/p3vors.cc \
gui/SpeexProcessor.cpp \
gui/audiodevicehelper.cpp \
gui/VoipStatistics.cpp \
gui/AudioChatWidgetHolder.cpp \
gui/VOIPChatWidgetHolder.cpp \
gui/PluginGUIHandler.cpp \
gui/PluginNotifier.cpp \
gui/VideoProcessor.cpp \
gui/QVideoDevice.cpp \
VOIPPlugin.cpp
HEADERS = services/p3vors.h \
@ -45,9 +48,11 @@ HEADERS = services/p3vors.h \
gui/SpeexProcessor.h \
gui/audiodevicehelper.h \
gui/VoipStatistics.h \
gui/AudioChatWidgetHolder.h \
gui/VOIPChatWidgetHolder.h \
gui/PluginGUIHandler.h \
gui/PluginNotifier.h \
gui/VideoProcessor.h \
gui/QVideoDevice.h \
interface/rsvoip.h \
VOIPPlugin.h
@ -81,4 +86,4 @@ TRANSLATIONS += \
lang/VOIP_tr.ts \
lang/VOIP_zh_CN.ts
LIBS += -lspeex -lspeexdsp
LIBS += -lspeex -lspeexdsp -lopencv_core -lopencv_highgui

View File

@ -12,7 +12,7 @@
#include "gui/VoipStatistics.h"
#include "gui/AudioInputConfig.h"
#include "gui/AudioChatWidgetHolder.h"
#include "gui/VOIPChatWidgetHolder.h"
#include "gui/PluginGUIHandler.h"
#include "gui/PluginNotifier.h"
#include "gui/SoundManager.h"
@ -80,6 +80,9 @@ void VOIPPlugin::setInterfaces(RsPlugInInterfaces &interfaces)
ConfigPage *VOIPPlugin::qt_config_page() const
{
// The config pages are deleted when config is closed, so it's important not to static the
// created object.
//
return new AudioInputConfig() ;
}
@ -111,7 +114,7 @@ ChatWidgetHolder *VOIPPlugin::qt_get_chat_widget_holder(ChatWidget *chatWidget)
{
switch (chatWidget->chatType()) {
case ChatWidget::CHATTYPE_PRIVATE:
return new AudioChatWidgetHolder(chatWidget);
return new VOIPChatWidgetHolder(chatWidget);
case ChatWidget::CHATTYPE_UNKNOWN:
case ChatWidget::CHATTYPE_LOBBY:
case ChatWidget::CHATTYPE_DISTANT:

View File

@ -1,238 +0,0 @@
#include <QToolButton>
#include <QPropertyAnimation>
#include <QIcon>
#include "AudioChatWidgetHolder.h"
#include <gui/audiodevicehelper.h>
#include "interface/rsvoip.h"
#include "gui/SoundManager.h"
#include "util/HandleRichText.h"
#include "gui/common/StatusDefs.h"
#include "gui/chat/ChatWidget.h"
#include <retroshare/rsstatus.h>
#define CALL_START ":/images/call-start-22.png"
#define CALL_STOP ":/images/call-stop-22.png"
#define CALL_HOLD ":/images/call-hold-22.png"
AudioChatWidgetHolder::AudioChatWidgetHolder(ChatWidget *chatWidget)
: QObject(), ChatWidgetHolder(chatWidget)
{
audioListenToggleButton = new QToolButton ;
audioListenToggleButton->setMinimumSize(QSize(28,28)) ;
audioListenToggleButton->setMaximumSize(QSize(28,28)) ;
audioListenToggleButton->setText(QString()) ;
audioListenToggleButton->setToolTip(tr("Mute yourself"));
std::cerr << "****** VOIPLugin: Creating new AudioChatWidgetHolder !!" << std::endl;
QIcon icon ;
icon.addPixmap(QPixmap(":/images/audio-volume-muted-22.png")) ;
icon.addPixmap(QPixmap(":/images/audio-volume-medium-22.png"),QIcon::Normal,QIcon::On) ;
icon.addPixmap(QPixmap(":/images/audio-volume-medium-22.png"),QIcon::Disabled,QIcon::On) ;
icon.addPixmap(QPixmap(":/images/audio-volume-medium-22.png"),QIcon::Active,QIcon::On) ;
icon.addPixmap(QPixmap(":/images/audio-volume-medium-22.png"),QIcon::Selected,QIcon::On) ;
audioListenToggleButton->setIcon(icon) ;
audioListenToggleButton->setIconSize(QSize(22,22)) ;
audioListenToggleButton->setAutoRaise(true) ;
audioListenToggleButton->setCheckable(true);
audioMuteCaptureToggleButton = new QToolButton ;
audioMuteCaptureToggleButton->setMinimumSize(QSize(28,28)) ;
audioMuteCaptureToggleButton->setMaximumSize(QSize(28,28)) ;
audioMuteCaptureToggleButton->setText(QString()) ;
audioMuteCaptureToggleButton->setToolTip(tr("Start Call"));
QIcon icon2 ;
icon2.addPixmap(QPixmap(":/images/call-start-22.png")) ;
icon2.addPixmap(QPixmap(":/images/call-hold-22.png"),QIcon::Normal,QIcon::On) ;
icon2.addPixmap(QPixmap(":/images/call-hold-22.png"),QIcon::Disabled,QIcon::On) ;
icon2.addPixmap(QPixmap(":/images/call-hold-22.png"),QIcon::Active,QIcon::On) ;
icon2.addPixmap(QPixmap(":/images/call-hold-22.png"),QIcon::Selected,QIcon::On) ;
audioMuteCaptureToggleButton->setIcon(icon2) ;
audioMuteCaptureToggleButton->setIconSize(QSize(22,22)) ;
audioMuteCaptureToggleButton->setAutoRaise(true) ;
audioMuteCaptureToggleButton->setCheckable(true) ;
hangupButton = new QToolButton ;
hangupButton->setIcon(QIcon(":/images/call-stop-22.png")) ;
hangupButton->setIconSize(QSize(22,22)) ;
hangupButton->setMinimumSize(QSize(28,28)) ;
hangupButton->setMaximumSize(QSize(28,28)) ;
hangupButton->setCheckable(false) ;
hangupButton->setAutoRaise(true) ;
hangupButton->setText(QString()) ;
hangupButton->setToolTip(tr("Hangup Call"));
connect(audioListenToggleButton, SIGNAL(clicked()), this , SLOT(toggleAudioListen()));
connect(audioMuteCaptureToggleButton, SIGNAL(clicked()), this , SLOT(toggleAudioMuteCapture()));
connect(hangupButton, SIGNAL(clicked()), this , SLOT(hangupCall()));
mChatWidget->addChatBarWidget(audioListenToggleButton) ;
mChatWidget->addChatBarWidget(audioMuteCaptureToggleButton) ;
mChatWidget->addChatBarWidget(hangupButton) ;
outputProcessor = NULL ;
outputDevice = NULL ;
inputProcessor = NULL ;
inputDevice = NULL ;
}
AudioChatWidgetHolder::~AudioChatWidgetHolder()
{
if(inputDevice != NULL)
inputDevice->stop() ;
}
void AudioChatWidgetHolder::toggleAudioListen()
{
std::cerr << "******** VOIPLugin: Toggling audio listen!" << std::endl;
if (audioListenToggleButton->isChecked()) {
audioListenToggleButton->setToolTip(tr("Mute yourself"));
} else {
audioListenToggleButton->setToolTip(tr("Unmute yourself"));
//audioListenToggleButton->setChecked(false);
/*if (outputDevice) {
outputDevice->stop();
}*/
}
}
void AudioChatWidgetHolder::hangupCall()
{
std::cerr << "******** VOIPLugin: Hangup call!" << std::endl;
disconnect(inputProcessor, SIGNAL(networkPacketReady()), this, SLOT(sendAudioData()));
if (inputDevice) {
inputDevice->stop();
}
if (outputDevice) {
outputDevice->stop();
}
audioListenToggleButton->setChecked(false);
audioMuteCaptureToggleButton->setChecked(false);
}
void AudioChatWidgetHolder::toggleAudioMuteCapture()
{
std::cerr << "******** VOIPLugin: Toggling audio mute capture!" << std::endl;
if (audioMuteCaptureToggleButton->isChecked()) {
//activate audio output
audioListenToggleButton->setChecked(true);
audioMuteCaptureToggleButton->setToolTip(tr("Hold Call"));
//activate audio input
if (!inputProcessor) {
inputProcessor = new QtSpeex::SpeexInputProcessor();
if (outputProcessor) {
connect(outputProcessor, SIGNAL(playingFrame(QByteArray*)), inputProcessor, SLOT(addEchoFrame(QByteArray*)));
}
inputProcessor->open(QIODevice::WriteOnly | QIODevice::Unbuffered);
}
if (!inputDevice) {
inputDevice = AudioDeviceHelper::getPreferedInputDevice();
}
connect(inputProcessor, SIGNAL(networkPacketReady()), this, SLOT(sendAudioData()));
inputDevice->start(inputProcessor);
if (mChatWidget) {
mChatWidget->addChatMsg(true, tr("VoIP Status"), QDateTime::currentDateTime(), QDateTime::currentDateTime(), tr("Outgoing Call is started..."), ChatWidget::MSGTYPE_SYSTEM);
}
} else {
disconnect(inputProcessor, SIGNAL(networkPacketReady()), this, SLOT(sendAudioData()));
if (inputDevice) {
inputDevice->stop();
}
audioMuteCaptureToggleButton->setToolTip(tr("Resume Call"));
}
}
void AudioChatWidgetHolder::addAudioData(const QString name, QByteArray* array)
{
if (!audioMuteCaptureToggleButton->isChecked()) {
//launch an animation. Don't launch it if already animating
if (!audioMuteCaptureToggleButton->graphicsEffect() ||
(audioMuteCaptureToggleButton->graphicsEffect()->inherits("QGraphicsOpacityEffect") &&
((QGraphicsOpacityEffect*)audioMuteCaptureToggleButton->graphicsEffect())->opacity() == 1)
) {
QGraphicsOpacityEffect *effect = new QGraphicsOpacityEffect(audioListenToggleButton);
audioMuteCaptureToggleButton->setGraphicsEffect(effect);
QPropertyAnimation *anim = new QPropertyAnimation(effect, "opacity");
anim->setStartValue(1);
anim->setKeyValueAt(0.5,0);
anim->setEndValue(1);
anim->setDuration(400);
anim->start();
}
// soundManager->play(VOIP_SOUND_INCOMING_CALL);
audioMuteCaptureToggleButton->setToolTip(tr("Answer"));
//TODO make a toaster and a sound for the incoming call
return;
}
if (!outputDevice) {
outputDevice = AudioDeviceHelper::getDefaultOutputDevice();
}
if (!outputProcessor) {
//start output audio device
outputProcessor = new QtSpeex::SpeexOutputProcessor();
if (inputProcessor) {
connect(outputProcessor, SIGNAL(playingFrame(QByteArray*)), inputProcessor, SLOT(addEchoFrame(QByteArray*)));
}
outputProcessor->open(QIODevice::ReadOnly | QIODevice::Unbuffered);
outputDevice->start(outputProcessor);
}
if (outputDevice && outputDevice->error() != QAudio::NoError) {
std::cerr << "Restarting output device. Error before reset " << outputDevice->error() << " buffer size : " << outputDevice->bufferSize() << std::endl;
outputDevice->stop();
outputDevice->reset();
if (outputDevice->error() == QAudio::UnderrunError)
outputDevice->setBufferSize(20);
outputDevice->start(outputProcessor);
}
outputProcessor->putNetworkPacket(name, *array);
//check the input device for errors
if (inputDevice && inputDevice->error() != QAudio::NoError) {
std::cerr << "Restarting input device. Error before reset " << inputDevice->error() << std::endl;
inputDevice->stop();
inputDevice->reset();
inputDevice->start(inputProcessor);
}
}
void AudioChatWidgetHolder::sendAudioData()
{
while(inputProcessor && inputProcessor->hasPendingPackets()) {
QByteArray qbarray = inputProcessor->getNetworkPacket();
RsVoipDataChunk chunk;
chunk.size = qbarray.size();
chunk.data = (void*)qbarray.constData();
rsVoip->sendVoipData(mChatWidget->getPeerId(),chunk);
}
}
void AudioChatWidgetHolder::updateStatus(int status)
{
audioListenToggleButton->setEnabled(true);
audioMuteCaptureToggleButton->setEnabled(true);
hangupButton->setEnabled(true);
switch (status) {
case RS_STATUS_OFFLINE:
audioListenToggleButton->setEnabled(false);
audioMuteCaptureToggleButton->setEnabled(false);
hangupButton->setEnabled(false);
break;
}
}

View File

@ -1,41 +0,0 @@
#include <QObject>
#include <QGraphicsEffect>
#include <gui/SpeexProcessor.h>
#include <gui/chat/ChatWidget.h>
class QToolButton;
class QAudioInput;
class QAudioOutput;
#define VOIP_SOUND_INCOMING_CALL "VOIP_incoming_call"
class AudioChatWidgetHolder : public QObject, public ChatWidgetHolder
{
Q_OBJECT
public:
AudioChatWidgetHolder(ChatWidget *chatWidget);
virtual ~AudioChatWidgetHolder();
virtual void updateStatus(int status);
void addAudioData(const QString name, QByteArray* array) ;
private slots:
void toggleAudioListen();
void toggleAudioMuteCapture();
void hangupCall() ;
public slots:
void sendAudioData();
protected:
QAudioInput* inputDevice;
QAudioOutput* outputDevice;
QtSpeex::SpeexInputProcessor* inputProcessor;
QtSpeex::SpeexOutputProcessor* outputProcessor;
QToolButton *audioListenToggleButton ;
QToolButton *audioMuteCaptureToggleButton ;
QToolButton *hangupButton ;
};

View File

@ -55,28 +55,43 @@ void AudioInputDialog::showEvent(QShowEvent *) {
AudioInputConfig::AudioInputConfig(QWidget * parent, Qt::WindowFlags flags)
: ConfigPage(parent, flags)
{
std::cerr << "Creating audioInputConfig object" << std::endl;
/* Invoke the Qt Designer generated object setup routine */
ui.setupUi(this);
loaded = false;
inputProcessor = NULL;
inputDevice = NULL;
inputAudioProcessor = NULL;
inputAudioDevice = NULL;
abSpeech = NULL;
// Create the video pipeline.
//
videoInput = new QVideoInputDevice(this) ;
videoInput->setEchoVideoTarget(ui.videoDisplay) ;
videoInput->setVideoEncoder(NULL) ;
}
AudioInputConfig::~AudioInputConfig()
{
if (inputDevice) {
inputDevice->stop();
delete inputDevice ;
inputDevice = NULL ;
std::cerr << "Deleting audioInputConfig object" << std::endl;
if(videoInput != NULL)
{
videoInput->stop() ;
delete videoInput ;
}
if (inputAudioDevice) {
inputAudioDevice->stop();
delete inputAudioDevice ;
inputAudioDevice = NULL ;
}
if(inputProcessor)
if(inputAudioProcessor)
{
delete inputProcessor ;
inputProcessor = NULL ;
delete inputAudioProcessor ;
inputAudioProcessor = NULL ;
}
}
@ -168,6 +183,9 @@ void AudioInputConfig::loadSettings() {
connect( ui.qsAmp, SIGNAL( valueChanged ( int ) ), this, SLOT( on_qsAmp_valueChanged(int) ) );
connect( ui.qcbTransmit, SIGNAL( currentIndexChanged ( int ) ), this, SLOT( on_qcbTransmit_currentIndexChanged(int) ) );
loaded = true;
std::cerr << "AudioInputConfig:: starting video." << std::endl;
videoInput->start() ;
}
bool AudioInputConfig::save(QString &/*errmsg*/) {//mainly useless beacause saving occurs in realtime
@ -248,15 +266,15 @@ void AudioInputConfig::on_qcbTransmit_currentIndexChanged(int v) {
void AudioInputConfig::on_Tick_timeout() {
if (!inputProcessor) {
inputProcessor = new QtSpeex::SpeexInputProcessor();
inputProcessor->open(QIODevice::WriteOnly | QIODevice::Unbuffered);
if (!inputAudioProcessor) {
inputAudioProcessor = new QtSpeex::SpeexInputProcessor();
inputAudioProcessor->open(QIODevice::WriteOnly | QIODevice::Unbuffered);
if (!inputDevice) {
inputDevice = AudioDeviceHelper::getPreferedInputDevice();
if (!inputAudioDevice) {
inputAudioDevice = AudioDeviceHelper::getPreferedInputDevice();
}
inputDevice->start(inputProcessor);
connect(inputProcessor, SIGNAL(networkPacketReady()), this, SLOT(emptyBuffer()));
inputAudioDevice->start(inputAudioProcessor);
connect(inputAudioProcessor, SIGNAL(networkPacketReady()), this, SLOT(emptyBuffer()));
}
abSpeech->iBelow = ui.qsTransmitMin->value();
@ -266,14 +284,14 @@ void AudioInputConfig::on_Tick_timeout() {
rsVoip->setVoipfVADmax(ui.qsTransmitMax->value());
}
abSpeech->iValue = iroundf(inputProcessor->dVoiceAcivityLevel * 32767.0f + 0.5f);
abSpeech->iValue = iroundf(inputAudioProcessor->dVoiceAcivityLevel * 32767.0f + 0.5f);
abSpeech->update();
}
void AudioInputConfig::emptyBuffer() {
while(inputProcessor->hasPendingPackets()) {
inputProcessor->getNetworkPacket(); //that will purge the buffer
while(inputAudioProcessor->hasPendingPackets()) {
inputAudioProcessor->getNetworkPacket(); //that will purge the buffer
}
}

View File

@ -38,6 +38,7 @@
#include "ui_AudioInputConfig.h"
#include "SpeexProcessor.h"
#include "VideoProcessor.h"
#include "AudioStats.h"
class AudioInputConfig : public ConfigPage
@ -46,9 +47,12 @@ class AudioInputConfig : public ConfigPage
private:
Ui::AudioInput ui;
QAudioInput* inputDevice;
QtSpeex::SpeexInputProcessor* inputProcessor;
QAudioInput* inputAudioDevice;
QtSpeex::SpeexInputProcessor* inputAudioProcessor;
AudioBar* abSpeech;
//VideoDecoder *videoDecoder ;
//VideoEncoder *videoEncoder ;
QVideoInputDevice *videoInput ;
bool loaded;

View File

@ -6,11 +6,11 @@
<rect>
<x>0</x>
<y>0</y>
<width>508</width>
<height>378</height>
<width>501</width>
<height>406</height>
</rect>
</property>
<layout class="QVBoxLayout" name="qwVadLayout">
<layout class="QVBoxLayout" name="verticalLayout">
<item>
<widget class="QPushButton" name="qpbAudioWizard">
<property name="text">
@ -235,116 +235,145 @@
</widget>
</item>
<item>
<widget class="QGroupBox" name="qgbAudio">
<property name="title">
<string>Audio Processing</string>
</property>
<layout class="QGridLayout">
<item row="0" column="0">
<widget class="QLabel" name="qliNoise">
<property name="text">
<string>Noise Suppression</string>
</property>
<property name="buddy">
<cstring>qsNoise</cstring>
</property>
</widget>
</item>
<item row="0" column="1">
<widget class="QSlider" name="qsNoise">
<property name="enabled">
<bool>true</bool>
</property>
<property name="toolTip">
<string>Noise suppression</string>
</property>
<property name="whatsThis">
<string>&lt;b&gt;This sets the amount of noise suppression to apply.&lt;/b&gt;&lt;br /&gt;The higher this value, the more aggressively stationary noise will be suppressed.</string>
</property>
<property name="minimum">
<number>14</number>
</property>
<property name="maximum">
<number>60</number>
</property>
<property name="pageStep">
<number>5</number>
</property>
<property name="orientation">
<enum>Qt::Horizontal</enum>
</property>
</widget>
</item>
<item row="0" column="2">
<widget class="QLabel" name="qlNoise">
<property name="minimumSize">
<size>
<width>30</width>
<height>0</height>
</size>
</property>
<property name="text">
<string/>
</property>
</widget>
</item>
<item row="1" column="0">
<widget class="QLabel" name="qliAmp">
<property name="text">
<string>Amplification</string>
</property>
<property name="buddy">
<cstring>qsAmp</cstring>
</property>
</widget>
</item>
<item row="1" column="1">
<widget class="QSlider" name="qsAmp">
<property name="toolTip">
<string>Maximum amplification of input sound</string>
</property>
<property name="whatsThis">
<string>&lt;b&gt;Maximum amplification of input.&lt;/b&gt;&lt;br /&gt;RetroShare normalizes the input volume before compressing, and this sets how much it's allowed to amplify.&lt;br /&gt;The actual level is continually updated based on your current speech pattern, but it will never go above the level specified here.&lt;br /&gt;If the &lt;i&gt;Microphone loudness&lt;/i&gt; level of the audio statistics hover around 100%, you probably want to set this to 2.0 or so, but if, like most people, you are unable to reach 100%, set this to something much higher.&lt;br /&gt;Ideally, set it so &lt;i&gt;Microphone Loudness * Amplification Factor &gt;= 100&lt;/i&gt;, even when you're speaking really soft.&lt;br /&gt;&lt;br /&gt;Note that there is no harm in setting this to maximum, but RetroShare will start picking up other conversations if you leave it to auto-tune to that level.</string>
</property>
<property name="maximum">
<number>19500</number>
</property>
<property name="singleStep">
<number>500</number>
</property>
<property name="pageStep">
<number>2000</number>
</property>
<property name="orientation">
<enum>Qt::Horizontal</enum>
</property>
</widget>
</item>
<item row="1" column="2">
<widget class="QLabel" name="qlAmp">
<property name="minimumSize">
<size>
<width>30</width>
<height>0</height>
</size>
</property>
<property name="text">
<string/>
</property>
</widget>
</item>
<item row="2" column="0">
<widget class="QCheckBox" name="qcbEchoCancel">
<property name="text">
<string>Echo Cancellation Processing</string>
</property>
<property name="tristate">
<bool>false</bool>
</property>
</widget>
</item>
</layout>
</widget>
<layout class="QHBoxLayout" name="horizontalLayout">
<item>
<widget class="QGroupBox" name="qgbAudio">
<property name="title">
<string>Audio Processing</string>
</property>
<layout class="QGridLayout">
<item row="0" column="0">
<widget class="QLabel" name="qliNoise">
<property name="text">
<string>Noise Suppression</string>
</property>
<property name="buddy">
<cstring>qsNoise</cstring>
</property>
</widget>
</item>
<item row="0" column="1">
<widget class="QSlider" name="qsNoise">
<property name="enabled">
<bool>true</bool>
</property>
<property name="toolTip">
<string>Noise suppression</string>
</property>
<property name="whatsThis">
<string>&lt;b&gt;This sets the amount of noise suppression to apply.&lt;/b&gt;&lt;br /&gt;The higher this value, the more aggressively stationary noise will be suppressed.</string>
</property>
<property name="minimum">
<number>14</number>
</property>
<property name="maximum">
<number>60</number>
</property>
<property name="pageStep">
<number>5</number>
</property>
<property name="orientation">
<enum>Qt::Horizontal</enum>
</property>
</widget>
</item>
<item row="0" column="2">
<widget class="QLabel" name="qlNoise">
<property name="minimumSize">
<size>
<width>30</width>
<height>0</height>
</size>
</property>
<property name="text">
<string/>
</property>
</widget>
</item>
<item row="1" column="0">
<widget class="QLabel" name="qliAmp">
<property name="text">
<string>Amplification</string>
</property>
<property name="buddy">
<cstring>qsAmp</cstring>
</property>
</widget>
</item>
<item row="1" column="1">
<widget class="QSlider" name="qsAmp">
<property name="toolTip">
<string>Maximum amplification of input sound</string>
</property>
<property name="whatsThis">
<string>&lt;b&gt;Maximum amplification of input.&lt;/b&gt;&lt;br /&gt;RetroShare normalizes the input volume before compressing, and this sets how much it's allowed to amplify.&lt;br /&gt;The actual level is continually updated based on your current speech pattern, but it will never go above the level specified here.&lt;br /&gt;If the &lt;i&gt;Microphone loudness&lt;/i&gt; level of the audio statistics hover around 100%, you probably want to set this to 2.0 or so, but if, like most people, you are unable to reach 100%, set this to something much higher.&lt;br /&gt;Ideally, set it so &lt;i&gt;Microphone Loudness * Amplification Factor &gt;= 100&lt;/i&gt;, even when you're speaking really soft.&lt;br /&gt;&lt;br /&gt;Note that there is no harm in setting this to maximum, but RetroShare will start picking up other conversations if you leave it to auto-tune to that level.</string>
</property>
<property name="maximum">
<number>19500</number>
</property>
<property name="singleStep">
<number>500</number>
</property>
<property name="pageStep">
<number>2000</number>
</property>
<property name="orientation">
<enum>Qt::Horizontal</enum>
</property>
</widget>
</item>
<item row="1" column="2">
<widget class="QLabel" name="qlAmp">
<property name="minimumSize">
<size>
<width>30</width>
<height>0</height>
</size>
</property>
<property name="text">
<string/>
</property>
</widget>
</item>
<item row="2" column="0">
<widget class="QCheckBox" name="qcbEchoCancel">
<property name="text">
<string>Echo Cancellation Processing</string>
</property>
<property name="tristate">
<bool>false</bool>
</property>
</widget>
</item>
</layout>
</widget>
</item>
<item>
<widget class="QGroupBox" name="groupBox">
<property name="title">
<string>Video Processing</string>
</property>
<layout class="QVBoxLayout" name="verticalLayout_2">
<item>
<widget class="QVideoOutputDevice" name="videoDisplay">
<property name="minimumSize">
<size>
<width>170</width>
<height>128</height>
</size>
</property>
<property name="frameShape">
<enum>QFrame::StyledPanel</enum>
</property>
<property name="frameShadow">
<enum>QFrame::Raised</enum>
</property>
</widget>
</item>
</layout>
</widget>
</item>
</layout>
</item>
<item>
<spacer>
@ -361,6 +390,14 @@
</item>
</layout>
</widget>
<customwidgets>
<customwidget>
<class>QVideoOutputDevice</class>
<extends>QFrame</extends>
<header>gui/QVideoDevice.h</header>
<container>1</container>
</customwidget>
</customwidgets>
<tabstops>
<tabstop>qcbTransmit</tabstop>
<tabstop>qsDoublePush</tabstop>

View File

@ -4,7 +4,7 @@
#include <interface/rsvoip.h>
#include "PluginGUIHandler.h"
#include <gui/chat/ChatDialog.h>
#include <gui/AudioChatWidgetHolder.h>
#include <gui/VOIPChatWidgetHolder.h>
#include "gui/chat/ChatWidget.h"
void PluginGUIHandler::ReceivedInvitation(const QString& /*peer_id*/)
@ -42,7 +42,7 @@ void PluginGUIHandler::ReceivedVoipData(const QString& qpeer_id)
const QList<ChatWidgetHolder*> &chatWidgetHolderList = cw->chatWidgetHolderList();
foreach (ChatWidgetHolder *chatWidgetHolder, chatWidgetHolderList) {
AudioChatWidgetHolder *acwh = dynamic_cast<AudioChatWidgetHolder*>(chatWidgetHolder) ;
VOIPChatWidgetHolder *acwh = dynamic_cast<VOIPChatWidgetHolder*>(chatWidgetHolder) ;
if (acwh) {
for (unsigned int i = 0; i < chunks.size(); ++i) {

View File

@ -0,0 +1,102 @@
#include <cv.h>
#include <highgui.h>
#include <QTimer>
#include <QPainter>
#include "QVideoDevice.h"
#include "VideoProcessor.h"
QVideoInputDevice::QVideoInputDevice(QWidget *parent)
{
_timer = NULL ;
_capture_device = NULL ;
_video_encoder = NULL ;
_echo_output_device = NULL ;
}
void QVideoInputDevice::stop()
{
if(_timer != NULL)
{
QObject::disconnect(_timer,SIGNAL(timeout()),this,SLOT(grabFrame())) ;
_timer->stop() ;
delete _timer ;
_timer = NULL ;
}
if(_capture_device != NULL)
{
cvReleaseCapture(&_capture_device) ;
_capture_device = NULL ;
}
}
void QVideoInputDevice::start()
{
// make sure everything is re-initialised
//
stop() ;
// Initialise la capture
static const int cam_id = 0 ;
_capture_device = cvCaptureFromCAM(cam_id);
if(_capture_device == NULL)
{
std::cerr << "Cannot initialise camera. Something's wrong." << std::endl;
return ;
}
_timer = new QTimer ;
QObject::connect(_timer,SIGNAL(timeout()),this,SLOT(grabFrame())) ;
_timer->start(50) ; // 10 images per second.
}
void QVideoInputDevice::grabFrame()
{
IplImage *img=cvQueryFrame(_capture_device);
if(img == NULL)
{
std::cerr << "(EE) Cannot capture image from camera. Something's wrong." << std::endl;
return ;
}
// get the image data
if(img->nChannels != 3)
{
std::cerr << "(EE) expected 3 channels. Got " << img->nChannels << std::endl;
cvReleaseImage(&img) ;
return ;
}
static const int _encoded_width = 128 ;
static const int _encoded_height = 128 ;
QImage image = QImage((uchar*)img->imageData,img->width,img->height,QImage::Format_RGB888).scaled(QSize(_encoded_width,_encoded_height),Qt::IgnoreAspectRatio,Qt::SmoothTransformation) ;
if(_video_encoder != NULL) _video_encoder->addImage(image) ;
if(_echo_output_device != NULL) _echo_output_device->showFrame(image) ;
}
QVideoInputDevice::~QVideoInputDevice()
{
stop() ;
}
QVideoOutputDevice::QVideoOutputDevice(QWidget *parent)
: QLabel(parent)
{
setPixmap(QPixmap(":/images/video-icon-big.png").scaled(170,128,Qt::KeepAspectRatio,Qt::SmoothTransformation)) ;
}
void QVideoOutputDevice::showFrame(const QImage& img)
{
//std::cerr << "Displaying frame!!" << std::endl;
//QPainter painter(this) ;
//painter.drawImage(QPointF(0,0),img) ;
setPixmap(QPixmap::fromImage(img).scaled(minimumSize(),Qt::IgnoreAspectRatio,Qt::SmoothTransformation)) ;
}

View File

@ -0,0 +1,52 @@
#pragma once
#include <QLabel>
class VideoEncoder ;
class CvCapture ;
// Responsible from displaying the video. The source of the video is
// a VideoDecoder object, which uses a codec.
//
class QVideoOutputDevice: public QLabel
{
public:
QVideoOutputDevice(QWidget *parent) ;
void showFrame(const QImage&) ;
};
// Responsible for grabbing the video from the webcam and sending it to the
// VideoEncoder object.
//
class QVideoInputDevice: public QObject
{
Q_OBJECT
public:
QVideoInputDevice(QWidget *parent) ;
~QVideoInputDevice() ;
// Captured images are sent to this encoder. Can be NULL.
//
void setVideoEncoder(VideoEncoder *venc) { _video_encoder = venc ; }
// All images received will be echoed to this target. We could use signal/slots, but it's
// probably faster this way. Can be NULL.
//
void setEchoVideoTarget(QVideoOutputDevice *odev) { _echo_output_device = odev ; }
void start() ;
void stop() ;
protected slots:
void grabFrame() ;
private:
VideoEncoder *_video_encoder ;
QTimer *_timer ;
CvCapture *_capture_device ;
QVideoOutputDevice *_echo_output_device ;
};

View File

@ -0,0 +1,307 @@
#include <QToolButton>
#include <QPropertyAnimation>
#include <QIcon>
#include <QLayout>
#include <gui/audiodevicehelper.h>
#include "interface/rsvoip.h"
#include "gui/SoundManager.h"
#include "util/HandleRichText.h"
#include "gui/common/StatusDefs.h"
#include "gui/chat/ChatWidget.h"
#include "VOIPChatWidgetHolder.h"
#include "VideoProcessor.h"
#include "QVideoDevice.h"
#include <retroshare/rsstatus.h>
#define CALL_START ":/images/call-start-22.png"
#define CALL_STOP ":/images/call-stop-22.png"
#define CALL_HOLD ":/images/call-hold-22.png"
VOIPChatWidgetHolder::VOIPChatWidgetHolder(ChatWidget *chatWidget)
: QObject(), ChatWidgetHolder(chatWidget)
{
std::cerr << "****** VOIPLugin: Creating new VOIPChatWidgetHolder !!" << std::endl;
QIcon icon ;
icon.addPixmap(QPixmap(":/images/audio-volume-muted-22.png")) ;
icon.addPixmap(QPixmap(":/images/audio-volume-medium-22.png"),QIcon::Normal,QIcon::On) ;
icon.addPixmap(QPixmap(":/images/audio-volume-medium-22.png"),QIcon::Disabled,QIcon::On) ;
icon.addPixmap(QPixmap(":/images/audio-volume-medium-22.png"),QIcon::Active,QIcon::On) ;
icon.addPixmap(QPixmap(":/images/audio-volume-medium-22.png"),QIcon::Selected,QIcon::On) ;
audioListenToggleButton = new QToolButton ;
audioListenToggleButton->setIcon(icon) ;
audioListenToggleButton->setIconSize(QSize(22,22)) ;
audioListenToggleButton->setAutoRaise(true) ;
audioListenToggleButton->setCheckable(true);
audioListenToggleButton->setMinimumSize(QSize(28,28)) ;
audioListenToggleButton->setMaximumSize(QSize(28,28)) ;
audioListenToggleButton->setText(QString()) ;
audioListenToggleButton->setToolTip(tr("Mute"));
QIcon icon2 ;
icon2.addPixmap(QPixmap(":/images/call-start-22.png")) ;
icon2.addPixmap(QPixmap(":/images/call-hold-22.png"),QIcon::Normal,QIcon::On) ;
icon2.addPixmap(QPixmap(":/images/call-hold-22.png"),QIcon::Disabled,QIcon::On) ;
icon2.addPixmap(QPixmap(":/images/call-hold-22.png"),QIcon::Active,QIcon::On) ;
icon2.addPixmap(QPixmap(":/images/call-hold-22.png"),QIcon::Selected,QIcon::On) ;
audioCaptureToggleButton = new QToolButton ;
audioCaptureToggleButton->setMinimumSize(QSize(28,28)) ;
audioCaptureToggleButton->setMaximumSize(QSize(28,28)) ;
audioCaptureToggleButton->setText(QString()) ;
audioCaptureToggleButton->setToolTip(tr("Start Call"));
audioCaptureToggleButton->setIcon(icon2) ;
audioCaptureToggleButton->setIconSize(QSize(22,22)) ;
audioCaptureToggleButton->setAutoRaise(true) ;
audioCaptureToggleButton->setCheckable(true) ;
QIcon icon3 ;
icon3.addPixmap(QPixmap(":/images/camera-on.png")) ;
icon3.addPixmap(QPixmap(":/images/camera-off.png"),QIcon::Normal,QIcon::On) ;
icon3.addPixmap(QPixmap(":/images/camera-off.png"),QIcon::Disabled,QIcon::On) ;
icon3.addPixmap(QPixmap(":/images/camera-off.png"),QIcon::Active,QIcon::On) ;
icon3.addPixmap(QPixmap(":/images/camera-off.png"),QIcon::Selected,QIcon::On) ;
videoCaptureToggleButton = new QToolButton ;
videoCaptureToggleButton->setMinimumSize(QSize(28,28)) ;
videoCaptureToggleButton->setMaximumSize(QSize(28,28)) ;
videoCaptureToggleButton->setText(QString()) ;
videoCaptureToggleButton->setToolTip(tr("Start Call"));
videoCaptureToggleButton->setIcon(icon3) ;
videoCaptureToggleButton->setIconSize(QSize(22,22)) ;
videoCaptureToggleButton->setAutoRaise(true) ;
videoCaptureToggleButton->setCheckable(true) ;
hangupButton = new QToolButton ;
hangupButton->setIcon(QIcon(":/images/call-stop-22.png")) ;
hangupButton->setIconSize(QSize(22,22)) ;
hangupButton->setMinimumSize(QSize(28,28)) ;
hangupButton->setMaximumSize(QSize(28,28)) ;
hangupButton->setCheckable(false) ;
hangupButton->setAutoRaise(true) ;
hangupButton->setText(QString()) ;
hangupButton->setToolTip(tr("Hangup Call"));
connect(videoCaptureToggleButton, SIGNAL(clicked()), this , SLOT(toggleVideoCapture()));
connect(audioListenToggleButton, SIGNAL(clicked()), this , SLOT(toggleAudioListen()));
connect(audioCaptureToggleButton, SIGNAL(clicked()), this , SLOT(toggleAudioCapture()));
connect(hangupButton, SIGNAL(clicked()), this , SLOT(hangupCall()));
mChatWidget->addChatBarWidget(audioListenToggleButton) ;
mChatWidget->addChatBarWidget(audioCaptureToggleButton) ;
mChatWidget->addChatBarWidget(videoCaptureToggleButton) ;
mChatWidget->addChatBarWidget(hangupButton) ;
outputAudioProcessor = NULL ;
outputAudioDevice = NULL ;
inputAudioProcessor = NULL ;
inputAudioDevice = NULL ;
inputVideoDevice = new QVideoInputDevice(mChatWidget) ; // not started yet ;-)
inputVideoProcessor = new JPEGVideoEncoder ;
outputVideoProcessor = new JPEGVideoDecoder ;
// Make a widget with two video devices, one for echo, and one for the talking peer.
videoWidget = new QWidget(mChatWidget) ;
videoWidget->setLayout(new QHBoxLayout()) ;
videoWidget->layout()->addWidget(echoVideoDevice = new QVideoOutputDevice(videoWidget)) ;
videoWidget->layout()->addWidget(outputVideoDevice = new QVideoOutputDevice(videoWidget)) ;
echoVideoDevice->setMinimumSize(128,95) ;
outputVideoDevice->setMinimumSize(128,95) ;
mChatWidget->addChatHorizontalWidget(videoWidget) ;
inputVideoDevice->setEchoVideoTarget(echoVideoDevice) ;
outputVideoProcessor->setDisplayTarget(outputVideoDevice) ;
}
VOIPChatWidgetHolder::~VOIPChatWidgetHolder()
{
if(inputAudioDevice != NULL)
inputAudioDevice->stop() ;
delete inputVideoDevice ;
delete inputVideoProcessor ;
delete outputVideoProcessor ;
}
void VOIPChatWidgetHolder::toggleAudioListen()
{
std::cerr << "******** VOIPLugin: Toggling audio listen!" << std::endl;
if (audioListenToggleButton->isChecked()) {
audioListenToggleButton->setToolTip(tr("Mute yourself"));
} else {
audioListenToggleButton->setToolTip(tr("Unmute yourself"));
//audioListenToggleButton->setChecked(false);
/*if (outputAudioDevice) {
outputAudioDevice->stop();
}*/
}
}
void VOIPChatWidgetHolder::hangupCall()
{
std::cerr << "******** VOIPLugin: Hangup call!" << std::endl;
disconnect(inputAudioProcessor, SIGNAL(networkPacketReady()), this, SLOT(sendAudioData()));
if (inputAudioDevice) {
inputAudioDevice->stop();
}
if (outputAudioDevice) {
outputAudioDevice->stop();
}
audioListenToggleButton->setChecked(false);
audioCaptureToggleButton->setChecked(false);
}
void VOIPChatWidgetHolder::toggleAudioCapture()
{
std::cerr << "******** VOIPLugin: Toggling audio mute capture!" << std::endl;
if (audioCaptureToggleButton->isChecked()) {
//activate audio output
audioListenToggleButton->setChecked(true);
audioCaptureToggleButton->setToolTip(tr("Hold Call"));
//activate audio input
if (!inputAudioProcessor) {
inputAudioProcessor = new QtSpeex::SpeexInputProcessor();
if (outputAudioProcessor) {
connect(outputAudioProcessor, SIGNAL(playingFrame(QByteArray*)), inputAudioProcessor, SLOT(addEchoFrame(QByteArray*)));
}
inputAudioProcessor->open(QIODevice::WriteOnly | QIODevice::Unbuffered);
}
if (!inputAudioDevice) {
inputAudioDevice = AudioDeviceHelper::getPreferedInputDevice();
}
connect(inputAudioProcessor, SIGNAL(networkPacketReady()), this, SLOT(sendAudioData()));
inputAudioDevice->start(inputAudioProcessor);
if (mChatWidget) {
mChatWidget->addChatMsg(true, tr("VoIP Status"), QDateTime::currentDateTime(), QDateTime::currentDateTime(), tr("Outgoing Call is started..."), ChatWidget::MSGTYPE_SYSTEM);
}
} else {
disconnect(inputAudioProcessor, SIGNAL(networkPacketReady()), this, SLOT(sendAudioData()));
if (inputAudioDevice) {
inputAudioDevice->stop();
}
audioCaptureToggleButton->setToolTip(tr("Resume Call"));
}
}
void VOIPChatWidgetHolder::toggleVideoCapture()
{
std::cerr << "******** VOIPLugin: Toggling video capture!" << std::endl;
if (videoCaptureToggleButton->isChecked())
{
//activate video input
//
inputVideoDevice->start() ;
videoCaptureToggleButton->setToolTip(tr("Shut camera off"));
if (mChatWidget)
mChatWidget->addChatMsg(true, tr("VoIP Status"), QDateTime::currentDateTime(), QDateTime::currentDateTime(), tr("you're now sending video..."), ChatWidget::MSGTYPE_SYSTEM);
}
else
{
if(inputVideoDevice)
{
delete inputVideoDevice ;
inputVideoDevice = NULL ;
}
videoCaptureToggleButton->setToolTip(tr("Activate camera"));
}
}
void VOIPChatWidgetHolder::addAudioData(const QString name, QByteArray* array)
{
if (!audioCaptureToggleButton->isChecked()) {
//launch an animation. Don't launch it if already animating
if (!audioCaptureToggleButton->graphicsEffect() ||
(audioCaptureToggleButton->graphicsEffect()->inherits("QGraphicsOpacityEffect") &&
((QGraphicsOpacityEffect*)audioCaptureToggleButton->graphicsEffect())->opacity() == 1)
) {
QGraphicsOpacityEffect *effect = new QGraphicsOpacityEffect(audioListenToggleButton);
audioCaptureToggleButton->setGraphicsEffect(effect);
QPropertyAnimation *anim = new QPropertyAnimation(effect, "opacity");
anim->setStartValue(1);
anim->setKeyValueAt(0.5,0);
anim->setEndValue(1);
anim->setDuration(400);
anim->start();
}
// soundManager->play(VOIP_SOUND_INCOMING_CALL);
audioCaptureToggleButton->setToolTip(tr("Answer"));
//TODO make a toaster and a sound for the incoming call
return;
}
if (!outputAudioDevice) {
outputAudioDevice = AudioDeviceHelper::getDefaultOutputDevice();
}
if (!outputAudioProcessor) {
//start output audio device
outputAudioProcessor = new QtSpeex::SpeexOutputProcessor();
if (inputAudioProcessor) {
connect(outputAudioProcessor, SIGNAL(playingFrame(QByteArray*)), inputAudioProcessor, SLOT(addEchoFrame(QByteArray*)));
}
outputAudioProcessor->open(QIODevice::ReadOnly | QIODevice::Unbuffered);
outputAudioDevice->start(outputAudioProcessor);
}
if (outputAudioDevice && outputAudioDevice->error() != QAudio::NoError) {
std::cerr << "Restarting output device. Error before reset " << outputAudioDevice->error() << " buffer size : " << outputAudioDevice->bufferSize() << std::endl;
outputAudioDevice->stop();
outputAudioDevice->reset();
if (outputAudioDevice->error() == QAudio::UnderrunError)
outputAudioDevice->setBufferSize(20);
outputAudioDevice->start(outputAudioProcessor);
}
outputAudioProcessor->putNetworkPacket(name, *array);
//check the input device for errors
if (inputAudioDevice && inputAudioDevice->error() != QAudio::NoError) {
std::cerr << "Restarting input device. Error before reset " << inputAudioDevice->error() << std::endl;
inputAudioDevice->stop();
inputAudioDevice->reset();
inputAudioDevice->start(inputAudioProcessor);
}
}
void VOIPChatWidgetHolder::sendAudioData()
{
while(inputAudioProcessor && inputAudioProcessor->hasPendingPackets()) {
QByteArray qbarray = inputAudioProcessor->getNetworkPacket();
RsVoipDataChunk chunk;
chunk.size = qbarray.size();
chunk.data = (void*)qbarray.constData();
rsVoip->sendVoipData(mChatWidget->getPeerId(),chunk);
}
}
void VOIPChatWidgetHolder::updateStatus(int status)
{
audioListenToggleButton->setEnabled(true);
audioCaptureToggleButton->setEnabled(true);
hangupButton->setEnabled(true);
switch (status) {
case RS_STATUS_OFFLINE:
audioListenToggleButton->setEnabled(false);
audioCaptureToggleButton->setEnabled(false);
hangupButton->setEnabled(false);
break;
}
}

View File

@ -0,0 +1,62 @@
#include <QObject>
#include <QGraphicsEffect>
#include <gui/SpeexProcessor.h>
#include <gui/chat/ChatWidget.h>
class QToolButton;
class QAudioInput;
class QAudioOutput;
class QVideoInputDevice ;
class QVideoOutputDevice ;
class VideoEncoder ;
class VideoDecoder ;
#define VOIP_SOUND_INCOMING_CALL "VOIP_incoming_call"
class VOIPChatWidgetHolder : public QObject, public ChatWidgetHolder
{
Q_OBJECT
public:
VOIPChatWidgetHolder(ChatWidget *chatWidget);
virtual ~VOIPChatWidgetHolder();
virtual void updateStatus(int status);
void addAudioData(const QString name, QByteArray* array) ;
void addVideoData(const QString name, QByteArray* array) ;
private slots:
void toggleAudioListen();
void toggleAudioCapture();
void toggleVideoCapture();
void hangupCall() ;
public slots:
void sendAudioData();
protected:
// Audio input/output
QAudioInput* inputAudioDevice;
QAudioOutput* outputAudioDevice;
QtSpeex::SpeexInputProcessor* inputAudioProcessor;
QtSpeex::SpeexOutputProcessor* outputAudioProcessor;
// Video input/output
QVideoOutputDevice *outputVideoDevice;
QVideoOutputDevice *echoVideoDevice;
QVideoInputDevice *inputVideoDevice;
QWidget *videoWidget ; // pointer to call show/hide
VideoEncoder *inputVideoProcessor;
VideoDecoder *outputVideoProcessor;
// Additional buttons to the chat bar
QToolButton *audioListenToggleButton ;
QToolButton *audioCaptureToggleButton ;
QToolButton *videoCaptureToggleButton ;
QToolButton *hangupButton ;
};

View File

@ -8,6 +8,9 @@
<file>images/call-start-22.png</file>
<file>images/call-stop-22.png</file>
<file>images/call-hold-22.png</file>
<file>images/camera-on.png</file>
<file>images/camera-off.png</file>
<file>images/video-icon-big.png</file>
</qresource>
</RCC>

View File

@ -0,0 +1,60 @@
#include <iostream>
#include <QByteArray>
#include <QBuffer>
#include <QImage>
#include "VideoProcessor.h"
#include "QVideoDevice.h"
//bool VideoDecoder::getNextImage(QImage& image)
//{
// if(_image_queue.empty())
// return false ;
//
// image = _image_queue.front() ;
// _image_queue.pop_front() ;
//
// return true ;
//}
bool VideoEncoder::addImage(const QImage& img)
{
std::cerr << "VideoEncoder: adding image." << std::endl;
encodeData(img) ;
if(_echo_output_device != NULL)
_echo_output_device->showFrame(img) ;
return true ;
}
void VideoDecoder::receiveEncodedData(const unsigned char *data,uint32_t size)
{
_output_device->showFrame(decodeData(data,size)) ;
}
QImage JPEGVideoDecoder::decodeData(const unsigned char *encoded_image_data,uint32_t size)
{
QByteArray qb((char*)encoded_image_data,size) ;
QImage image ;
if(image.loadFromData(qb))
return image ;
else
return QImage() ;
}
void JPEGVideoEncoder::encodeData(const QImage& image)
{
QByteArray qb ;
QBuffer buffer(&qb) ;
buffer.open(QIODevice::WriteOnly) ;
image.save(&buffer,"JPEG") ;
//destination_decoder->receiveEncodedData((unsigned char *)qb.data(),qb.size()) ;
std::cerr <<"sending encoded data. size = " << qb.size() << std::endl;
}

View File

@ -0,0 +1,79 @@
#pragma once
#include <stdint.h>
#include <QImage>
class QVideoOutputDevice ;
// This class decodes video from a stream. It keeps a queue of
// decoded frame that needs to be retrieved using the getNextImage() method.
//
class VideoDecoder
{
public:
VideoDecoder() { _output_device = NULL ;}
// Gets the next image to be displayed. Once returned, the image should
// be cleared from the incoming queue.
//
void setDisplayTarget(QVideoOutputDevice *odev) { _output_device = odev ; }
virtual void receiveEncodedData(const unsigned char *data,uint32_t size) ;
private:
QVideoOutputDevice *_output_device ;
std::list<QImage> _image_queue ;
// Incoming data is processed by a video codec and converted into images.
//
virtual QImage decodeData(const unsigned char *encoded_image,uint32_t encoded_image_size) = 0 ;
// This buffer accumulated incoming encoded data, until a full packet is obtained,
// since the stream might not send images at once. When incoming images are decoded, the
// data is removed from the buffer.
//
unsigned char *buffer ;
uint32_t buffer_size ;
};
// This class encodes video using a video codec (possibly homemade, or based on existing codecs)
// and produces a data stream that is sent to the network transfer service (e.g. p3VoRs).
//
class VideoEncoder
{
public:
VideoEncoder() { _echo_output_device = NULL ;}
// Takes the next image to be encoded.
//
virtual bool addImage(const QImage& Image) ;
protected:
//virtual bool sendEncodedData(unsigned char *mem,uint32_t size) = 0 ;
virtual void encodeData(const QImage& image) = 0 ;
unsigned char *buffer ;
uint32_t buffer_size ;
QVideoOutputDevice *_echo_output_device ;
};
// Now derive various image encoding/decoding algorithms.
//
class JPEGVideoDecoder: public VideoDecoder
{
protected:
virtual QImage decodeData(const unsigned char *encoded_image,uint32_t encoded_image_size) ;
};
class JPEGVideoEncoder: public VideoEncoder
{
public:
JPEGVideoEncoder() {}
protected:
virtual void encodeData(const QImage& Image) ;
};

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.4 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 4.0 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 45 KiB