PDA

View Full Version : QAudioInput with QAudioEncoderSettings to write wav header to a audio?



the_naive
3rd December 2015, 22:12
I am trying to develop a server that will stream real time audio to a client. Right now I am able to stream the "audio/pcm" audio data through the default input device from the computer and able to print the amplitude in a painter on the server widget. The problem is that when I receive the raw audio data on the client, I cannot create an intelligible output out of that audio data.


void AudioStreamer::on_listen_clicked()
{
//This method formats the audio and then defines the device
m_format.setSampleRate(8000);//QAudioFormat m_format;
m_format.setChannelCount(1);
m_format.setSampleSize(16);
m_format.setSampleType(QAudioFormat::SignedInt);
m_format.setByteOrder(QAudioFormat::LittleEndian);
m_format.setCodec("audio/pcm");

QAudioDeviceInfo devinfo(QAudioDeviceInfo::defaultInputDevice());
if (!devinfo.isFormatSupported(m_format)) {
qWarning() << "Default format not supported - trying to use nearest";
m_format = devinfo.nearestFormat(m_format);
}

m_audioInfo = new AudioInfo(m_format, this);

m_audioInput = new QAudioInput(devinfo, m_format, this);
connect(m_audioInfo, SIGNAL(update(QByteArray)), SLOT(refreshDisplay(QByteArray)));
quint16 port = ui->portNumber->text().toInt();
server = new Server(port, this);


m_audioInfo->start();
m_audioInput->start(m_audioInfo);

ui->portNumber->setEnabled(false);
ui->listen->setEnabled(false);
}


void AudioStreamer::refreshDisplay(QByteArray data)
{
//This method streams the audio through the server
server->writeData(data);
m_canvas->setLevel(m_audioInfo->level());
}

Please ignore m_audioInfo and AudioInfo as they are only used for painting the amplitude of the audio on the input device.

When I am trying the play the received audio by the client, I am also trying to print the different properties of the audio format:



void Client::readyRead()
{
QByteArray data;

while (socket->bytesAvailable() > 0)
data.append(socket->readAll());

qDebug() << "The size of the WAV file is: " << data.size();
QBuffer* audio_buffer = new QBuffer(&data);
audio_buffer->open(QIODevice::ReadOnly);
qDebug() << audio_buffer->size();

QAudioFormat format;

format.setSampleSize(16);
format.setSampleRate(8000);
format.setChannelCount(1);
format.setCodec("audio/pcm");
format.setByteOrder(QAudioFormat::LittleEndian);
format.setSampleType(QAudioFormat::SignedInt);

QAudioDeviceInfo info(QAudioDeviceInfo::defaultOutputDevice());
if (!info.isFormatSupported(format)) {
qWarning()<<"raw audio format not supported by backend, cannot play audio.";
return;
}
qDebug() << info.deviceName();

QAudioOutput* testoutput = new QAudioOutput(info, format);
testoutput->start(audio_buffer);

// ...then wait for the sound to finish
QEventLoop loop;
QObject::connect(testoutput, SIGNAL(stateChanged(QAudio::State)), &loop, SLOT(quit()));
do {
loop.exec();
} while(testoutput->state() == QAudio::ActiveState);

// Define the header components
char fileType[4];
qint32 fileSize;
char waveName[4];
char fmtName[3];
qint32 fmtLength;
short fmtType;
short numberOfChannels;
qint32 sampleRate;
qint32 sampleRateXBitsPerSampleXChanngelsDivEight;
short bitsPerSampleXChannelsDivEightPointOne;
short bitsPerSample;
char dataHeader[4];
qint32 dataSize;

// Create a data stream to analyze the data

QDataStream analyzeHeaderDS(&data,QIODevice::ReadOnly);
analyzeHeaderDS.setByteOrder(QDataStream::LittleEn dian);

// Now pop off the appropriate data into each header field defined above

analyzeHeaderDS.readRawData(fileType,4); // "RIFF"
analyzeHeaderDS >> fileSize; // File Size
analyzeHeaderDS.readRawData(waveName,4); // "WAVE"
analyzeHeaderDS.readRawData(fmtName,3); // "fmt"
analyzeHeaderDS >> fmtLength; // Format length
analyzeHeaderDS >> fmtType; // Format type
analyzeHeaderDS >> numberOfChannels; // Number of channels
analyzeHeaderDS >> sampleRate; // Sample rate
analyzeHeaderDS >> sampleRateXBitsPerSampleXChanngelsDivEight; // (Sample Rate * BitsPerSample * Channels) / 8
analyzeHeaderDS >> bitsPerSampleXChannelsDivEightPointOne; // (BitsPerSample * Channels) / 8.1
analyzeHeaderDS >> bitsPerSample; // Bits per sample
analyzeHeaderDS.readRawData(dataHeader,4); // "data" header
analyzeHeaderDS >> dataSize; // Data Size

// Print the header

qDebug() << "WAV File Header read:";
qDebug() << "File Type: " << QString::fromUtf8(fileType);
qDebug() << "File Size: " << fileSize;
qDebug() << "WAV Marker: " << QString::fromUtf8(waveName);
qDebug() << "Format Name: " << QString::fromUtf8(fmtName);
qDebug() << "Format Length: " << fmtLength;
qDebug() << "Format Type: " << fmtType;
qDebug() << "Number of Channels: " << numberOfChannels;
qDebug() << "Sample Rate: " << sampleRate;
qDebug() << "Sample Rate * Bits/Sample * Channels / 8: " << sampleRateXBitsPerSampleXChanngelsDivEight;
qDebug() << "Bits per Sample * Channels / 8.1: " << bitsPerSampleXChannelsDivEightPointOne;
qDebug() << "Bits per Sample: " << bitsPerSample;
qDebug() << "Data Header: " << QString::fromUtf8(dataHeader);
qDebug() << "Data Size: " << dataSize;

delete audio_buffer;
delete testoutput;
output.writeData(data);
}


But all the output is either empty or garbage and the audio output is intelligible.

So I am wondering if I have to write some header in the streamed audio? I know that Qt has QAudioEncoderSetting class. But how do I use it with QAudioInput to make the streamed audio properly formatted and intelligible?

Thank you.