|
1 /**************************************************************************** |
|
2 ** |
|
3 ** Copyright (C) 2010 Nokia Corporation and/or its subsidiary(-ies). |
|
4 ** All rights reserved. |
|
5 ** Contact: Nokia Corporation (qt-info@nokia.com) |
|
6 ** |
|
7 ** This file is part of the examples of the Qt Toolkit. |
|
8 ** |
|
9 ** $QT_BEGIN_LICENSE:LGPL$ |
|
10 ** No Commercial Usage |
|
11 ** This file contains pre-release code and may not be distributed. |
|
12 ** You may use this file in accordance with the terms and conditions |
|
13 ** contained in the Technology Preview License Agreement accompanying |
|
14 ** this package. |
|
15 ** |
|
16 ** GNU Lesser General Public License Usage |
|
17 ** Alternatively, this file may be used under the terms of the GNU Lesser |
|
18 ** General Public License version 2.1 as published by the Free Software |
|
19 ** Foundation and appearing in the file LICENSE.LGPL included in the |
|
20 ** packaging of this file. Please review the following information to |
|
21 ** ensure the GNU Lesser General Public License version 2.1 requirements |
|
22 ** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html. |
|
23 ** |
|
24 ** In addition, as a special exception, Nokia gives you certain additional |
|
25 ** rights. These rights are described in the Nokia Qt LGPL Exception |
|
26 ** version 1.1, included in the file LGPL_EXCEPTION.txt in this package. |
|
27 ** |
|
28 ** If you have questions regarding the use of this file, please contact |
|
29 ** Nokia at qt-info@nokia.com. |
|
30 ** |
|
31 ** |
|
32 ** |
|
33 ** |
|
34 ** |
|
35 ** |
|
36 ** |
|
37 ** |
|
38 ** $QT_END_LICENSE$ |
|
39 ** |
|
40 ****************************************************************************/ |
|
41 |
|
42 #include "engine.h" |
|
43 #include "tonegenerator.h" |
|
44 #include "utils.h" |
|
45 |
|
46 #include <math.h> |
|
47 |
|
48 #include <QCoreApplication> |
|
49 #include <QMetaObject> |
|
50 #include <QSet> |
|
51 #include <QtMultimedia/QAudioInput> |
|
52 #include <QtMultimedia/QAudioOutput> |
|
53 #include <QDebug> |
|
54 #include <QThread> |
|
55 #include <QFile> |
|
56 |
|
57 //----------------------------------------------------------------------------- |
|
58 // Constants |
|
59 //----------------------------------------------------------------------------- |
|
60 |
|
61 const qint64 BufferDurationUs = 10 * 1000000; |
|
62 const int NotifyIntervalMs = 100; |
|
63 |
|
64 // Size of the level calculation window in microseconds |
|
65 const int LevelWindowUs = 0.1 * 1000000; |
|
66 |
|
67 |
|
68 //----------------------------------------------------------------------------- |
|
69 // Helper functions |
|
70 //----------------------------------------------------------------------------- |
|
71 |
|
72 QDebug& operator<<(QDebug &debug, const QAudioFormat &format) |
|
73 { |
|
74 debug << format.frequency() << "Hz" |
|
75 << format.channels() << "channels"; |
|
76 return debug; |
|
77 } |
|
78 |
|
79 //----------------------------------------------------------------------------- |
|
80 // Constructor and destructor |
|
81 //----------------------------------------------------------------------------- |
|
82 |
|
83 Engine::Engine(QObject *parent) |
|
84 : QObject(parent) |
|
85 , m_mode(QAudio::AudioInput) |
|
86 , m_state(QAudio::StoppedState) |
|
87 , m_generateTone(false) |
|
88 , m_file(0) |
|
89 , m_availableAudioInputDevices |
|
90 (QAudioDeviceInfo::availableDevices(QAudio::AudioInput)) |
|
91 , m_audioInputDevice(QAudioDeviceInfo::defaultInputDevice()) |
|
92 , m_audioInput(0) |
|
93 , m_audioInputIODevice(0) |
|
94 , m_recordPosition(0) |
|
95 , m_availableAudioOutputDevices |
|
96 (QAudioDeviceInfo::availableDevices(QAudio::AudioOutput)) |
|
97 , m_audioOutputDevice(QAudioDeviceInfo::defaultOutputDevice()) |
|
98 , m_audioOutput(0) |
|
99 , m_playPosition(0) |
|
100 , m_dataLength(0) |
|
101 , m_rmsLevel(0.0) |
|
102 , m_peakLevel(0.0) |
|
103 , m_spectrumLengthBytes(0) |
|
104 , m_spectrumAnalyser() |
|
105 , m_spectrumPosition(0) |
|
106 , m_count(0) |
|
107 { |
|
108 qRegisterMetaType<FrequencySpectrum>("FrequencySpectrum"); |
|
109 CHECKED_CONNECT(&m_spectrumAnalyser, |
|
110 SIGNAL(spectrumChanged(FrequencySpectrum)), |
|
111 this, |
|
112 SLOT(spectrumChanged(FrequencySpectrum))); |
|
113 |
|
114 initialize(); |
|
115 |
|
116 #ifdef DUMP_DATA |
|
117 createOutputDir(); |
|
118 #endif |
|
119 |
|
120 #ifdef DUMP_SPECTRUM |
|
121 m_spectrumAnalyser.setOutputPath(outputPath()); |
|
122 #endif |
|
123 } |
|
124 |
|
125 Engine::~Engine() |
|
126 { |
|
127 |
|
128 } |
|
129 |
|
130 //----------------------------------------------------------------------------- |
|
131 // Public functions |
|
132 //----------------------------------------------------------------------------- |
|
133 |
|
134 bool Engine::loadFile(const QString &fileName) |
|
135 { |
|
136 bool result = false; |
|
137 m_generateTone = false; |
|
138 |
|
139 Q_ASSERT(!fileName.isEmpty()); |
|
140 Q_ASSERT(!m_file); |
|
141 m_file = new QFile(fileName, this); |
|
142 m_file->setFileName(fileName); |
|
143 Q_ASSERT(m_file->exists()); |
|
144 if (m_file->open(QFile::ReadOnly)) { |
|
145 m_wavFile.readHeader(*m_file); |
|
146 if (isPCMS16LE(m_wavFile.format())) { |
|
147 result = initialize(); |
|
148 } else { |
|
149 emit errorMessage(tr("Audio format not supported"), |
|
150 formatToString(m_wavFile.format())); |
|
151 } |
|
152 } else { |
|
153 emit errorMessage(tr("Could not open file"), fileName); |
|
154 } |
|
155 |
|
156 delete m_file; |
|
157 m_file = 0; |
|
158 |
|
159 return result; |
|
160 } |
|
161 |
|
162 bool Engine::generateTone(const Tone &tone) |
|
163 { |
|
164 Q_ASSERT(!m_file); |
|
165 m_generateTone = true; |
|
166 m_tone = tone; |
|
167 ENGINE_DEBUG << "Engine::generateTone" |
|
168 << "startFreq" << m_tone.startFreq |
|
169 << "endFreq" << m_tone.endFreq |
|
170 << "amp" << m_tone.amplitude; |
|
171 return initialize(); |
|
172 } |
|
173 |
|
174 bool Engine::generateSweptTone(qreal amplitude) |
|
175 { |
|
176 Q_ASSERT(!m_file); |
|
177 m_generateTone = true; |
|
178 m_tone.startFreq = 1; |
|
179 m_tone.endFreq = 0; |
|
180 m_tone.amplitude = amplitude; |
|
181 ENGINE_DEBUG << "Engine::generateSweptTone" |
|
182 << "startFreq" << m_tone.startFreq |
|
183 << "amp" << m_tone.amplitude; |
|
184 return initialize(); |
|
185 } |
|
186 |
|
187 bool Engine::initializeRecord() |
|
188 { |
|
189 ENGINE_DEBUG << "Engine::initializeRecord"; |
|
190 Q_ASSERT(!m_file); |
|
191 m_generateTone = false; |
|
192 m_tone = SweptTone(); |
|
193 return initialize(); |
|
194 } |
|
195 |
|
196 qint64 Engine::bufferDuration() const |
|
197 { |
|
198 return BufferDurationUs; |
|
199 } |
|
200 |
|
201 qint64 Engine::dataDuration() const |
|
202 { |
|
203 qint64 result = 0; |
|
204 if (QAudioFormat() != m_format) |
|
205 result = audioDuration(m_format, m_dataLength); |
|
206 return result; |
|
207 } |
|
208 |
|
209 qint64 Engine::audioBufferLength() const |
|
210 { |
|
211 qint64 length = 0; |
|
212 if (QAudio::ActiveState == m_state || QAudio::IdleState == m_state) { |
|
213 Q_ASSERT(QAudioFormat() != m_format); |
|
214 switch (m_mode) { |
|
215 case QAudio::AudioInput: |
|
216 length = m_audioInput->bufferSize(); |
|
217 break; |
|
218 case QAudio::AudioOutput: |
|
219 length = m_audioOutput->bufferSize(); |
|
220 break; |
|
221 } |
|
222 } |
|
223 return length; |
|
224 } |
|
225 |
|
226 void Engine::setWindowFunction(WindowFunction type) |
|
227 { |
|
228 m_spectrumAnalyser.setWindowFunction(type); |
|
229 } |
|
230 |
|
231 |
|
232 //----------------------------------------------------------------------------- |
|
233 // Public slots |
|
234 //----------------------------------------------------------------------------- |
|
235 |
|
236 void Engine::startRecording() |
|
237 { |
|
238 if (m_audioInput) { |
|
239 if (QAudio::AudioInput == m_mode && |
|
240 QAudio::SuspendedState == m_state) { |
|
241 m_audioInput->resume(); |
|
242 } else { |
|
243 m_spectrumAnalyser.cancelCalculation(); |
|
244 spectrumChanged(0, 0, FrequencySpectrum()); |
|
245 |
|
246 m_buffer.fill(0); |
|
247 setRecordPosition(0, true); |
|
248 stopPlayback(); |
|
249 m_mode = QAudio::AudioInput; |
|
250 CHECKED_CONNECT(m_audioInput, SIGNAL(stateChanged(QAudio::State)), |
|
251 this, SLOT(audioStateChanged(QAudio::State))); |
|
252 CHECKED_CONNECT(m_audioInput, SIGNAL(notify()), |
|
253 this, SLOT(audioNotify())); |
|
254 m_count = 0; |
|
255 m_dataLength = 0; |
|
256 emit dataDurationChanged(0); |
|
257 m_audioInputIODevice = m_audioInput->start(); |
|
258 CHECKED_CONNECT(m_audioInputIODevice, SIGNAL(readyRead()), |
|
259 this, SLOT(audioDataReady())); |
|
260 } |
|
261 } |
|
262 } |
|
263 |
|
264 void Engine::startPlayback() |
|
265 { |
|
266 if (m_audioOutput) { |
|
267 if (QAudio::AudioOutput == m_mode && |
|
268 QAudio::SuspendedState == m_state) { |
|
269 #ifdef Q_OS_WIN |
|
270 // The Windows backend seems to internally go back into ActiveState |
|
271 // while still returning SuspendedState, so to ensure that it doesn't |
|
272 // ignore the resume() call, we first re-suspend |
|
273 m_audioOutput->suspend(); |
|
274 #endif |
|
275 m_audioOutput->resume(); |
|
276 } else { |
|
277 m_spectrumAnalyser.cancelCalculation(); |
|
278 spectrumChanged(0, 0, FrequencySpectrum()); |
|
279 |
|
280 setPlayPosition(0, true); |
|
281 stopRecording(); |
|
282 m_mode = QAudio::AudioOutput; |
|
283 CHECKED_CONNECT(m_audioOutput, SIGNAL(stateChanged(QAudio::State)), |
|
284 this, SLOT(audioStateChanged(QAudio::State))); |
|
285 CHECKED_CONNECT(m_audioOutput, SIGNAL(notify()), |
|
286 this, SLOT(audioNotify())); |
|
287 m_count = 0; |
|
288 m_audioOutputIODevice.close(); |
|
289 m_audioOutputIODevice.setBuffer(&m_buffer); |
|
290 m_audioOutputIODevice.open(QIODevice::ReadOnly); |
|
291 m_audioOutput->start(&m_audioOutputIODevice); |
|
292 } |
|
293 } |
|
294 } |
|
295 |
|
296 void Engine::suspend() |
|
297 { |
|
298 if (QAudio::ActiveState == m_state || |
|
299 QAudio::IdleState == m_state) { |
|
300 switch (m_mode) { |
|
301 case QAudio::AudioInput: |
|
302 m_audioInput->suspend(); |
|
303 break; |
|
304 case QAudio::AudioOutput: |
|
305 m_audioOutput->suspend(); |
|
306 break; |
|
307 } |
|
308 } |
|
309 } |
|
310 |
|
311 void Engine::setAudioInputDevice(const QAudioDeviceInfo &device) |
|
312 { |
|
313 if (device.deviceName() != m_audioInputDevice.deviceName()) { |
|
314 m_audioInputDevice = device; |
|
315 initialize(); |
|
316 } |
|
317 } |
|
318 |
|
319 void Engine::setAudioOutputDevice(const QAudioDeviceInfo &device) |
|
320 { |
|
321 if (device.deviceName() != m_audioOutputDevice.deviceName()) { |
|
322 m_audioOutputDevice = device; |
|
323 initialize(); |
|
324 } |
|
325 } |
|
326 |
|
327 |
|
328 //----------------------------------------------------------------------------- |
|
329 // Private slots |
|
330 //----------------------------------------------------------------------------- |
|
331 |
|
332 void Engine::audioNotify() |
|
333 { |
|
334 switch (m_mode) { |
|
335 case QAudio::AudioInput: { |
|
336 const qint64 recordPosition = |
|
337 qMin(BufferDurationUs, m_audioInput->processedUSecs()); |
|
338 setRecordPosition(recordPosition); |
|
339 |
|
340 // Calculate level of most recently captured data |
|
341 qint64 levelLength = audioLength(m_format, LevelWindowUs); |
|
342 levelLength = qMin(m_dataLength, levelLength); |
|
343 const qint64 levelPosition = m_dataLength - levelLength; |
|
344 calculateLevel(levelPosition, levelLength); |
|
345 |
|
346 // Calculate spectrum of most recently captured data |
|
347 if (m_dataLength >= m_spectrumLengthBytes) { |
|
348 const qint64 spectrumPosition = m_dataLength - m_spectrumLengthBytes; |
|
349 calculateSpectrum(spectrumPosition); |
|
350 } |
|
351 } |
|
352 break; |
|
353 case QAudio::AudioOutput: { |
|
354 const qint64 playPosition = |
|
355 qMin(dataDuration(), m_audioOutput->processedUSecs()); |
|
356 setPlayPosition(playPosition); |
|
357 |
|
358 qint64 analysisPosition = audioLength(m_format, playPosition); |
|
359 |
|
360 // Calculate level of data starting at current playback position |
|
361 const qint64 levelLength = audioLength(m_format, LevelWindowUs); |
|
362 if (analysisPosition + levelLength < m_dataLength) |
|
363 calculateLevel(analysisPosition, levelLength); |
|
364 |
|
365 if (analysisPosition + m_spectrumLengthBytes < m_dataLength) |
|
366 calculateSpectrum(analysisPosition); |
|
367 |
|
368 if (dataDuration() == playPosition) |
|
369 stopPlayback(); |
|
370 } |
|
371 break; |
|
372 } |
|
373 } |
|
374 |
|
375 void Engine::audioStateChanged(QAudio::State state) |
|
376 { |
|
377 ENGINE_DEBUG << "Engine::audioStateChanged from" << m_state |
|
378 << "to" << state; |
|
379 |
|
380 if (QAudio::StoppedState == state) { |
|
381 // Check error |
|
382 QAudio::Error error = QAudio::NoError; |
|
383 switch (m_mode) { |
|
384 case QAudio::AudioInput: |
|
385 error = m_audioInput->error(); |
|
386 break; |
|
387 case QAudio::AudioOutput: |
|
388 error = m_audioOutput->error(); |
|
389 break; |
|
390 } |
|
391 if (QAudio::NoError != error) { |
|
392 reset(); |
|
393 return; |
|
394 } |
|
395 } |
|
396 setState(state); |
|
397 } |
|
398 |
|
399 void Engine::audioDataReady() |
|
400 { |
|
401 const qint64 bytesReady = m_audioInput->bytesReady(); |
|
402 const qint64 bytesSpace = m_buffer.size() - m_dataLength; |
|
403 const qint64 bytesToRead = qMin(bytesReady, bytesSpace); |
|
404 |
|
405 const qint64 bytesRead = m_audioInputIODevice->read( |
|
406 m_buffer.data() + m_dataLength, |
|
407 bytesToRead); |
|
408 |
|
409 if (bytesRead) { |
|
410 m_dataLength += bytesRead; |
|
411 |
|
412 const qint64 duration = audioDuration(m_format, m_dataLength); |
|
413 emit dataDurationChanged(duration); |
|
414 } |
|
415 |
|
416 if (m_buffer.size() == m_dataLength) |
|
417 stopRecording(); |
|
418 } |
|
419 |
|
420 void Engine::spectrumChanged(const FrequencySpectrum &spectrum) |
|
421 { |
|
422 ENGINE_DEBUG << "Engine::spectrumChanged" << "pos" << m_spectrumPosition; |
|
423 const qint64 positionUs = audioDuration(m_format, m_spectrumPosition); |
|
424 const qint64 lengthUs = audioDuration(m_format, m_spectrumLengthBytes); |
|
425 emit spectrumChanged(positionUs, lengthUs, spectrum); |
|
426 } |
|
427 |
|
428 |
|
429 //----------------------------------------------------------------------------- |
|
430 // Private functions |
|
431 //----------------------------------------------------------------------------- |
|
432 |
|
433 void Engine::reset() |
|
434 { |
|
435 stopRecording(); |
|
436 stopPlayback(); |
|
437 setState(QAudio::AudioInput, QAudio::StoppedState); |
|
438 setFormat(QAudioFormat()); |
|
439 delete m_audioInput; |
|
440 m_audioInput = 0; |
|
441 m_audioInputIODevice = 0; |
|
442 setRecordPosition(0); |
|
443 delete m_audioOutput; |
|
444 m_audioOutput = 0; |
|
445 setPlayPosition(0); |
|
446 m_buffer.clear(); |
|
447 m_dataLength = 0; |
|
448 m_spectrumPosition = 0; |
|
449 emit dataDurationChanged(0); |
|
450 setLevel(0.0, 0.0, 0); |
|
451 } |
|
452 |
|
453 bool Engine::initialize() |
|
454 { |
|
455 bool result = false; |
|
456 |
|
457 reset(); |
|
458 |
|
459 if (selectFormat()) { |
|
460 const qint64 bufferLength = audioLength(m_format, BufferDurationUs); |
|
461 m_buffer.resize(bufferLength); |
|
462 m_buffer.fill(0); |
|
463 emit bufferDurationChanged(BufferDurationUs); |
|
464 |
|
465 if (m_generateTone) { |
|
466 if (0 == m_tone.endFreq) { |
|
467 const qreal nyquist = nyquistFrequency(m_format); |
|
468 m_tone.endFreq = qMin(qreal(SpectrumHighFreq), nyquist); |
|
469 } |
|
470 |
|
471 // Call function defined in utils.h, at global scope |
|
472 ::generateTone(m_tone, m_format, m_buffer); |
|
473 m_dataLength = m_buffer.size(); |
|
474 emit dataDurationChanged(bufferDuration()); |
|
475 setRecordPosition(bufferDuration()); |
|
476 result = true; |
|
477 } else if (m_file) { |
|
478 const qint64 length = m_wavFile.readData(*m_file, m_buffer, m_format); |
|
479 if (length) { |
|
480 m_dataLength = length; |
|
481 emit dataDurationChanged(dataDuration()); |
|
482 setRecordPosition(dataDuration()); |
|
483 result = true; |
|
484 } |
|
485 } else { |
|
486 m_audioInput = new QAudioInput(m_audioInputDevice, m_format, this); |
|
487 m_audioInput->setNotifyInterval(NotifyIntervalMs); |
|
488 result = true; |
|
489 } |
|
490 |
|
491 m_audioOutput = new QAudioOutput(m_audioOutputDevice, m_format, this); |
|
492 m_audioOutput->setNotifyInterval(NotifyIntervalMs); |
|
493 m_spectrumLengthBytes = SpectrumLengthSamples * |
|
494 (m_format.sampleSize() / 8) * m_format.channels(); |
|
495 } else { |
|
496 if (m_file) |
|
497 emit errorMessage(tr("Audio format not supported"), |
|
498 formatToString(m_format)); |
|
499 else if (m_generateTone) |
|
500 emit errorMessage(tr("No suitable format found"), ""); |
|
501 else |
|
502 emit errorMessage(tr("No common input / output format found"), ""); |
|
503 } |
|
504 |
|
505 ENGINE_DEBUG << "Engine::initialize" << "format" << m_format; |
|
506 |
|
507 return result; |
|
508 } |
|
509 |
|
510 bool Engine::selectFormat() |
|
511 { |
|
512 bool foundSupportedFormat = false; |
|
513 |
|
514 if (m_file) { |
|
515 // Header is read from the WAV file; just need to check whether |
|
516 // it is supported by the audio output device |
|
517 QAudioFormat format = m_wavFile.format(); |
|
518 if (m_audioOutputDevice.isFormatSupported(m_wavFile.format())) { |
|
519 setFormat(m_wavFile.format()); |
|
520 foundSupportedFormat = true; |
|
521 } else { |
|
522 // Try flipping mono <-> stereo |
|
523 const int channels = (format.channels() == 1) ? 2 : 1; |
|
524 format.setChannels(channels); |
|
525 if (m_audioOutputDevice.isFormatSupported(format)) { |
|
526 setFormat(format); |
|
527 foundSupportedFormat = true; |
|
528 } |
|
529 } |
|
530 } else { |
|
531 |
|
532 QList<int> frequenciesList; |
|
533 #ifdef Q_OS_WIN |
|
534 // The Windows audio backend does not correctly report format support |
|
535 // (see QTBUG-9100). Furthermore, although the audio subsystem captures |
|
536 // at 11025Hz, the resulting audio is corrupted. |
|
537 frequenciesList += 8000; |
|
538 #endif |
|
539 |
|
540 if (!m_generateTone) |
|
541 frequenciesList += m_audioInputDevice.supportedFrequencies(); |
|
542 |
|
543 frequenciesList += m_audioOutputDevice.supportedFrequencies(); |
|
544 frequenciesList = frequenciesList.toSet().toList(); // remove duplicates |
|
545 qSort(frequenciesList); |
|
546 ENGINE_DEBUG << "Engine::initialize frequenciesList" << frequenciesList; |
|
547 |
|
548 QList<int> channelsList; |
|
549 channelsList += m_audioInputDevice.supportedChannels(); |
|
550 channelsList += m_audioOutputDevice.supportedChannels(); |
|
551 channelsList = channelsList.toSet().toList(); |
|
552 qSort(channelsList); |
|
553 ENGINE_DEBUG << "Engine::initialize channelsList" << channelsList; |
|
554 |
|
555 QAudioFormat format; |
|
556 format.setByteOrder(QAudioFormat::LittleEndian); |
|
557 format.setCodec("audio/pcm"); |
|
558 format.setSampleSize(16); |
|
559 format.setSampleType(QAudioFormat::SignedInt); |
|
560 int frequency, channels; |
|
561 foreach (frequency, frequenciesList) { |
|
562 if (foundSupportedFormat) |
|
563 break; |
|
564 format.setFrequency(frequency); |
|
565 foreach (channels, channelsList) { |
|
566 format.setChannels(channels); |
|
567 const bool inputSupport = m_generateTone || |
|
568 m_audioInputDevice.isFormatSupported(format); |
|
569 const bool outputSupport = m_audioOutputDevice.isFormatSupported(format); |
|
570 ENGINE_DEBUG << "Engine::initialize checking " << format |
|
571 << "input" << inputSupport |
|
572 << "output" << outputSupport; |
|
573 if (inputSupport && outputSupport) { |
|
574 foundSupportedFormat = true; |
|
575 break; |
|
576 } |
|
577 } |
|
578 } |
|
579 |
|
580 if (!foundSupportedFormat) |
|
581 format = QAudioFormat(); |
|
582 |
|
583 setFormat(format); |
|
584 } |
|
585 |
|
586 return foundSupportedFormat; |
|
587 } |
|
588 |
|
589 void Engine::stopRecording() |
|
590 { |
|
591 if (m_audioInput) { |
|
592 m_audioInput->stop(); |
|
593 QCoreApplication::instance()->processEvents(); |
|
594 m_audioInput->disconnect(); |
|
595 } |
|
596 m_audioInputIODevice = 0; |
|
597 |
|
598 #ifdef DUMP_AUDIO |
|
599 dumpData(); |
|
600 #endif |
|
601 } |
|
602 |
|
603 void Engine::stopPlayback() |
|
604 { |
|
605 if (m_audioOutput) { |
|
606 m_audioOutput->stop(); |
|
607 QCoreApplication::instance()->processEvents(); |
|
608 m_audioOutput->disconnect(); |
|
609 setPlayPosition(0); |
|
610 } |
|
611 } |
|
612 |
|
613 void Engine::setState(QAudio::State state) |
|
614 { |
|
615 const bool changed = (m_state != state); |
|
616 m_state = state; |
|
617 if (changed) |
|
618 emit stateChanged(m_mode, m_state); |
|
619 } |
|
620 |
|
621 void Engine::setState(QAudio::Mode mode, QAudio::State state) |
|
622 { |
|
623 const bool changed = (m_mode != mode || m_state != state); |
|
624 m_mode = mode; |
|
625 m_state = state; |
|
626 if (changed) |
|
627 emit stateChanged(m_mode, m_state); |
|
628 } |
|
629 |
|
630 void Engine::setRecordPosition(qint64 position, bool forceEmit) |
|
631 { |
|
632 const bool changed = (m_recordPosition != position); |
|
633 m_recordPosition = position; |
|
634 if (changed || forceEmit) |
|
635 emit recordPositionChanged(m_recordPosition); |
|
636 } |
|
637 |
|
638 void Engine::setPlayPosition(qint64 position, bool forceEmit) |
|
639 { |
|
640 const bool changed = (m_playPosition != position); |
|
641 m_playPosition = position; |
|
642 if (changed || forceEmit) |
|
643 emit playPositionChanged(m_playPosition); |
|
644 } |
|
645 |
|
646 void Engine::calculateLevel(qint64 position, qint64 length) |
|
647 { |
|
648 #ifdef DISABLE_LEVEL |
|
649 Q_UNUSED(position) |
|
650 Q_UNUSED(length) |
|
651 #else |
|
652 Q_ASSERT(position + length <= m_dataLength); |
|
653 |
|
654 qreal peakLevel = 0.0; |
|
655 |
|
656 qreal sum = 0.0; |
|
657 const char *ptr = m_buffer.constData() + position; |
|
658 const char *const end = ptr + length; |
|
659 while (ptr < end) { |
|
660 const qint16 value = *reinterpret_cast<const qint16*>(ptr); |
|
661 const qreal fracValue = pcmToReal(value); |
|
662 peakLevel = qMax(peakLevel, fracValue); |
|
663 sum += fracValue * fracValue; |
|
664 ptr += 2; |
|
665 } |
|
666 const int numSamples = length / 2; |
|
667 qreal rmsLevel = sqrt(sum / numSamples); |
|
668 |
|
669 rmsLevel = qMax(qreal(0.0), rmsLevel); |
|
670 rmsLevel = qMin(qreal(1.0), rmsLevel); |
|
671 setLevel(rmsLevel, peakLevel, numSamples); |
|
672 |
|
673 ENGINE_DEBUG << "Engine::calculateLevel" << "pos" << position << "len" << length |
|
674 << "rms" << rmsLevel << "peak" << peakLevel; |
|
675 #endif |
|
676 } |
|
677 |
|
678 void Engine::calculateSpectrum(qint64 position) |
|
679 { |
|
680 #ifdef DISABLE_SPECTRUM |
|
681 Q_UNUSED(position) |
|
682 #else |
|
683 Q_ASSERT(position + m_spectrumLengthBytes <= m_dataLength); |
|
684 Q_ASSERT(0 == m_spectrumLengthBytes % 2); // constraint of FFT algorithm |
|
685 |
|
686 // QThread::currentThread is marked 'for internal use only', but |
|
687 // we're only using it for debug output here, so it's probably OK :) |
|
688 ENGINE_DEBUG << "Engine::calculateSpectrum" << QThread::currentThread() |
|
689 << "count" << m_count << "pos" << position << "len" << m_spectrumLengthBytes |
|
690 << "spectrumAnalyser.isReady" << m_spectrumAnalyser.isReady(); |
|
691 |
|
692 if(m_spectrumAnalyser.isReady()) { |
|
693 m_spectrumBuffer = QByteArray::fromRawData(m_buffer.constData() + position, |
|
694 m_spectrumLengthBytes); |
|
695 m_spectrumPosition = position; |
|
696 m_spectrumAnalyser.calculate(m_spectrumBuffer, m_format); |
|
697 } |
|
698 #endif |
|
699 } |
|
700 |
|
701 void Engine::setFormat(const QAudioFormat &format) |
|
702 { |
|
703 const bool changed = (format != m_format); |
|
704 m_format = format; |
|
705 if (changed) |
|
706 emit formatChanged(m_format); |
|
707 } |
|
708 |
|
709 void Engine::setLevel(qreal rmsLevel, qreal peakLevel, int numSamples) |
|
710 { |
|
711 m_rmsLevel = rmsLevel; |
|
712 m_peakLevel = peakLevel; |
|
713 emit levelChanged(m_rmsLevel, m_peakLevel, numSamples); |
|
714 } |
|
715 |
|
716 #ifdef DUMP_DATA |
|
717 void Engine::createOutputDir() |
|
718 { |
|
719 m_outputDir.setPath("output"); |
|
720 |
|
721 // Ensure output directory exists and is empty |
|
722 if (m_outputDir.exists()) { |
|
723 const QStringList files = m_outputDir.entryList(QDir::Files); |
|
724 QString file; |
|
725 foreach (file, files) |
|
726 m_outputDir.remove(file); |
|
727 } else { |
|
728 QDir::current().mkdir("output"); |
|
729 } |
|
730 } |
|
731 #endif // DUMP_DATA |
|
732 |
|
733 #ifdef DUMP_AUDIO |
|
734 void Engine::dumpData() |
|
735 { |
|
736 const QString txtFileName = m_outputDir.filePath("data.txt"); |
|
737 QFile txtFile(txtFileName); |
|
738 txtFile.open(QFile::WriteOnly | QFile::Text); |
|
739 QTextStream stream(&txtFile); |
|
740 const qint16 *ptr = reinterpret_cast<const qint16*>(m_buffer.constData()); |
|
741 const int numSamples = m_dataLength / (2 * m_format.channels()); |
|
742 for (int i=0; i<numSamples; ++i) { |
|
743 stream << i << "\t" << *ptr << "\n"; |
|
744 ptr += m_format.channels(); |
|
745 } |
|
746 |
|
747 const QString pcmFileName = m_outputDir.filePath("data.pcm"); |
|
748 QFile pcmFile(pcmFileName); |
|
749 pcmFile.open(QFile::WriteOnly); |
|
750 pcmFile.write(m_buffer.constData(), m_dataLength); |
|
751 } |
|
752 #endif // DUMP_AUDIO |