Ticket #1104: mythtv_ac3.49.patch
File mythtv_ac3.49.patch, 126.4 KB (added by , 18 years ago) |
---|
-
configure
752 752 libfaac 753 753 libfaad 754 754 libfaadbin 755 libfftw3 755 756 libgsm 756 757 libmp3lame 757 758 libnut … … 2886 2887 echo "libfaac enabled ${libfaac-no}" 2887 2888 echo "libfaad enabled ${libfaad-no}" 2888 2889 echo "libfaad dlopened ${libfaadbin-no}" 2890 echo "libfftw3 support ${liba52-no}" 2889 2891 echo "libgsm enabled ${libgsm-no}" 2890 2892 echo "libmp3lame enabled ${libmp3lame-no}" 2891 2893 echo "libnut enabled ${libnut-no}" -
libs/libs.pro
6 6 # Directories 7 7 SUBDIRS += libavutil libavcodec libavformat libmythsamplerate 8 8 SUBDIRS += libmythsoundtouch libmythmpeg2 libmythdvdnav 9 SUBDIRS += libmythfreesurround 9 10 10 11 mingw : SUBDIRS += libmyth libmythupnp libmythui 11 12 !mingw: SUBDIRS += libmythupnp libmythui libmyth -
libs/libmyth/libmyth.pro
11 11 12 12 # Input 13 13 HEADERS += audiooutput.h audiooutputbase.h audiooutputnull.h 14 HEADERS += audiooutputdigitalencoder.h 14 15 HEADERS += backendselect.h dbsettings.h dialogbox.h 15 16 HEADERS += DisplayRes.h DisplayResScreen.h exitcodes.h 16 17 HEADERS += generictree.h httpcomms.h langsettings.h lcddevice.h … … 25 26 HEADERS += volumebase.h volumecontrol.h virtualkeyboard.h visual.h xmlparse.h 26 27 HEADERS += mythhdd.h mythcdrom.h storagegroup.h 27 28 HEADERS += compat.h 29 HEADERS += audiooutputdigitalencoder.h 28 30 29 31 SOURCES += audiooutput.cpp audiooutputbase.cpp audiooutputnull.cpp 32 SOURCES += audiooutputdigitalencoder.cpp 30 33 SOURCES += backendselect.cpp dbsettings.cpp dialogbox.cpp 31 34 SOURCES += DisplayRes.cpp DisplayResScreen.cpp 32 35 SOURCES += generictree.cpp httpcomms.cpp langsettings.cpp lcddevice.cpp … … 41 44 SOURCES += volumebase.cpp volumecontrol.cpp virtualkeyboard.cpp xmlparse.cpp 42 45 SOURCES += mythhdd.cpp mythcdrom.cpp storagegroup.cpp 43 46 44 INCLUDEPATH += ../libmythsamplerate ../libmythsoundtouch ../.. ../ ./ 47 INCLUDEPATH += ../libmythsamplerate ../libmythsoundtouch ../libmythfreesurround 48 INCLUDEPATH += ../libavcodec ../libavutil 49 INCLUDEPATH += ../.. ../ ./ 45 50 DEPENDPATH += ../libmythsamplerate ../libmythsoundtouch ../ ../libmythui 46 DEPENDPATH += ../libmythupnp 51 DEPENDPATH += ../libmythupnp ../libmythfreesurround ../libavcodec ../libavutil 47 52 48 LIBS += -L../libmythsamplerate -lmythsamplerate-$${LIBVERSION}49 LIBS += -L../libmythsoundtouch -lmythsoundtouch-$${LIBVERSION}50 LIBS += -L../libmythui -lmythui-$${LIBVERSION}51 LIBS += -L../libmythupnp -lmythupnp-$${LIBVERSION}52 53 54 LIBS += -L../libmythsamplerate -lmythsamplerate-$${LIBVERSION} 55 LIBS += -L../libmythsoundtouch -lmythsoundtouch-$${LIBVERSION} 56 LIBS += -L../libmythui -lmythui-$${LIBVERSION} 57 LIBS += -L../libmythupnp -lmythupnp-$${LIBVERSION} 58 LIBS += -L../libmythfreesurround -lmythfreesurround-$${LIBVERSION} 59 LIBS += -L../libavcodec -lmythavcodec-$${LIBVERSION} 60 LIBS += -L../libavutil -lmythavutil-$${LIBVERSION} 61 53 62 TARGETDEPS += ../libmythsamplerate/libmythsamplerate-$${MYTH_LIB_EXT} 54 63 TARGETDEPS += ../libmythsoundtouch/libmythsoundtouch-$${MYTH_LIB_EXT} 64 TARGETDEPS += ../libmythfreesurround/libmythfreesurround-$${MYTH_LIB_EXT} 55 65 56 66 # Install headers so that plugins can compile independently 57 67 inc.path = $${PREFIX}/include/mythtv/ … … 221 231 use_hidesyms { 222 232 QMAKE_CXXFLAGS += -fvisibility=hidden 223 233 } 234 235 contains( CONFIG_LIBA52, yes ) { 236 LIBS += -la52 237 } 238 239 contains( CONFIG_LIBFFTW3, yes ) { 240 LIBS += -lfftw3f 241 } -
libs/libmyth/audiooutput.h
31 31 virtual ~AudioOutput() { }; 32 32 33 33 // reconfigure sound out for new params 34 virtual void Reconfigure(int audio_bits, int audio_channels, 35 int audio_samplerate, bool audio_passthru) = 0; 34 virtual void Reconfigure(int audio_bits, 35 int audio_channels, 36 int audio_samplerate, 37 bool audio_passthru, 38 void* audio_codec = NULL) = 0; 36 39 37 40 virtual void SetStretchFactor(float factor); 41 virtual float GetStretchFactor(void) { return 1.0f; } 38 42 39 43 // do AddSamples calls block? 40 44 virtual void SetBlocking(bool blocking) = 0; … … 76 80 lastError = msg; 77 81 VERBOSE(VB_IMPORTANT, "AudioOutput Error: " + lastError); 78 82 } 83 void ClearError(void) { lastError = QString::null; } 79 84 80 85 void Warn(QString msg) 81 86 { -
libs/libmyth/audiooutputdx.h
35 35 /// END HACK HACK HACK HACK 36 36 37 37 virtual void Reset(void); 38 virtual void Reconfigure(int audio_bits, int audio_channels, 39 int audio_samplerate, int audio_passthru); 38 virtual void Reconfigure(int audio_bits, 39 int audio_channels, 40 int audio_samplerate, 41 bool audio_passthru, 42 AudioCodecMode aom = AUDIOCODECMODE_NORMAL); 40 43 virtual void SetBlocking(bool blocking); 41 44 42 45 virtual bool AddSamples(char *buffer, int samples, long long timecode); -
libs/libmyth/audiooutputdx.cpp
130 130 // FIXME: kedl: not sure what else could be required here? 131 131 } 132 132 133 void AudioOutputDX::Reconfigure(int audio_bits, int audio_channels, 134 int audio_samplerate, int audio_passthru) 133 void AudioOutputDX::Reconfigure(int audio_bits, 134 int audio_channels, 135 int audio_samplerate, 136 int audio_passthru, 137 AudioCodecMode laom) 135 138 { 136 139 if (dsbuffer) 137 140 DestroyDSBuffer(); -
libs/libmyth/audiooutputbase.h
16 16 // MythTV headers 17 17 #include "audiooutput.h" 18 18 #include "samplerate.h" 19 #include "SoundTouch.h"20 19 21 #define AUDBUFSIZE 768000 20 namespace soundtouch { 21 class SoundTouch; 22 }; 23 class FreeSurround; 24 class AudioOutputDigitalEncoder; 25 struct AVCodecContext; 26 22 27 #define AUDIO_SRC_IN_SIZE 16384 23 28 #define AUDIO_SRC_OUT_SIZE (16384*6) 24 29 #define AUDIO_TMP_BUF_SIZE (16384*6) 25 30 31 //#define AUDBUFSIZE 768000 32 //divisible by 12,10,8,6,4,2 and around 1024000 33 //#define AUDBUFSIZE 1024080 34 #define AUDBUFSIZE 1536000 35 26 36 class AudioOutputBase : public AudioOutput 27 37 { 28 38 public: … … 35 45 virtual ~AudioOutputBase(); 36 46 37 47 // reconfigure sound out for new params 38 virtual void Reconfigure(int audio_bits, int audio_channels, 39 int audio_samplerate, bool audio_passthru); 48 virtual void Reconfigure(int audio_bits, 49 int audio_channels, 50 int audio_samplerate, 51 bool audio_passthru, 52 void* audio_codec = NULL); 40 53 41 54 // do AddSamples calls block? 42 55 virtual void SetBlocking(bool blocking); … … 45 58 virtual void SetEffDsp(int dsprate); 46 59 47 60 virtual void SetStretchFactor(float factor); 61 virtual float GetStretchFactor(void); 48 62 49 63 virtual void Reset(void); 50 64 … … 127 141 bool audio_passthru; 128 142 129 143 float audio_stretchfactor; 144 AVCodecContext *audio_codec; 130 145 AudioOutputSource source; 131 146 132 147 bool killaudio; … … 135 150 bool set_initial_vol; 136 151 bool buffer_output_data_for_use; // used by AudioOutputNULL 137 152 153 int configured_audio_channels; 154 138 155 private: 139 156 // resampler 140 157 bool need_resampler; … … 145 162 short tmp_buff[AUDIO_TMP_BUF_SIZE]; 146 163 147 164 // timestretch 148 soundtouch::SoundTouch * pSoundStretch; 165 soundtouch::SoundTouch *pSoundStretch; 166 AudioOutputDigitalEncoder *encoder; 167 FreeSurround *upmixer; 149 168 169 int source_audio_channels; 170 int source_audio_bytes_per_sample; 171 bool needs_upmix; 172 int surround_mode; 173 150 174 bool blocking; // do AddSamples calls block? 151 175 152 176 int lastaudiolen; … … 164 188 165 189 pthread_mutex_t avsync_lock; /* must hold avsync_lock to read or write 166 190 'audiotime' and 'audiotime_updated' */ 167 int audiotime; // timecode of audio leaving the soundcard (same units as168 // timecodes) ...191 /// timecode of audio leaving the soundcard (same units as timecodes) 192 long long audiotime; 169 193 struct timeval audiotime_updated; // ... which was last updated at this time 170 194 171 195 /* Audio circular buffer */ 172 196 unsigned char audiobuffer[AUDBUFSIZE]; /* buffer */ 173 197 int raud, waud; /* read and write positions */ 174 int audbuf_timecode; /* timecode of audio most recently placed into175 buffer */198 /// timecode of audio most recently placed into buffer 199 long long audbuf_timecode; 176 200 177 201 int numlowbuffer; 178 202 -
libs/libmyth/audiooutputbase.cpp
15 15 16 16 // MythTV headers 17 17 #include "audiooutputbase.h" 18 #include "audiooutputdigitalencoder.h" 19 #include "SoundTouch.h" 20 #include "freesurround.h" 18 21 #include "compat.h" 19 22 20 23 #define LOC QString("AO: ") … … 36 39 audio_passthru_device(QDeepCopy<QString>(laudio_passthru_device)), 37 40 audio_passthru(false), audio_stretchfactor(1.0f), 38 41 42 audio_codec(NULL), 39 43 source(lsource), killaudio(false), 40 44 41 45 pauseaudio(false), audio_actually_paused(false), … … 47 51 48 52 src_ctx(NULL), 49 53 50 pSoundStretch(NULL), blocking(false), 54 pSoundStretch(NULL), 55 encoder(NULL), 56 upmixer(NULL), 57 source_audio_channels(-1), 58 source_audio_bytes_per_sample(0), 59 needs_upmix(false), 60 surround_mode(FreeSurround::SurroundModePassive), 51 61 62 blocking(false), 63 52 64 lastaudiolen(0), samples_buffered(0), 53 65 54 66 audio_thread_exists(false), … … 71 83 memset(tmp_buff, 0, sizeof(short) * AUDIO_TMP_BUF_SIZE); 72 84 memset(&audiotime_updated, 0, sizeof(audiotime_updated)); 73 85 memset(audiobuffer, 0, sizeof(char) * AUDBUFSIZE); 86 configured_audio_channels = gContext->GetNumSetting("MaxChannels", 2); 74 87 75 88 // You need to call Reconfigure from your concrete class. 76 89 // Reconfigure(laudio_bits, laudio_channels, … … 111 124 VERBOSE(VB_GENERAL, LOC + QString("Using time stretch %1") 112 125 .arg(audio_stretchfactor)); 113 126 pSoundStretch = new soundtouch::SoundTouch(); 114 pSoundStretch->setSampleRate(audio_samplerate); 115 pSoundStretch->setChannels(audio_channels); 127 if (audio_codec) 128 { 129 if (!encoder) 130 { 131 VERBOSE(VB_AUDIO, LOC + 132 QString("Creating Encoder for codec %1 origfs %2") 133 .arg(audio_codec->codec_id) 134 .arg(audio_codec->frame_size)); 116 135 136 encoder = new AudioOutputDigitalEncoder(); 137 if (!encoder->Init(audio_codec->codec_id, 138 audio_codec->bit_rate, 139 audio_codec->sample_rate, 140 audio_codec->channels 141 )) 142 { 143 // eeks 144 delete encoder; 145 encoder = NULL; 146 VERBOSE(VB_AUDIO, LOC + 147 QString("Failed to Create Encoder")); 148 } 149 } 150 } 151 if (encoder) 152 { 153 pSoundStretch->setSampleRate(audio_codec->sample_rate); 154 pSoundStretch->setChannels(audio_codec->channels); 155 } 156 else 157 { 158 pSoundStretch->setSampleRate(audio_samplerate); 159 pSoundStretch->setChannels(audio_channels); 160 } 161 117 162 pSoundStretch->setTempo(audio_stretchfactor); 118 163 pSoundStretch->setSetting(SETTING_SEQUENCE_MS, 35); 119 164 … … 134 179 pthread_mutex_unlock(&audio_buflock); 135 180 } 136 181 182 float AudioOutputBase::GetStretchFactor() 183 { 184 return audio_stretchfactor; 185 } 186 137 187 void AudioOutputBase::Reconfigure(int laudio_bits, int laudio_channels, 138 int laudio_samplerate, bool laudio_passthru) 188 int laudio_samplerate, bool laudio_passthru, 189 void* laudio_codec) 139 190 { 140 if (laudio_bits == audio_bits && laudio_channels == audio_channels && 141 laudio_samplerate == audio_samplerate && 142 laudio_passthru == audio_passthru && !need_resampler) 191 int codec_id = CODEC_ID_NONE; 192 int lcodec_id = CODEC_ID_NONE; 193 int lcchannels = 0; 194 int cchannels = 0; 195 int lsource_audio_channels = laudio_channels; 196 bool lneeds_upmix = false; 197 198 if (laudio_codec) 199 { 200 lcodec_id = ((AVCodecContext*)laudio_codec)->codec_id; 201 laudio_bits = 16; 202 laudio_channels = 2; 203 lsource_audio_channels = laudio_channels; 204 laudio_samplerate = 48000; 205 lcchannels = ((AVCodecContext*)laudio_codec)->channels; 206 } 207 208 if (audio_codec) 209 { 210 codec_id = audio_codec->codec_id; 211 cchannels = ((AVCodecContext*)audio_codec)->channels; 212 } 213 214 if ((configured_audio_channels == 6) && 215 !(laudio_codec || audio_codec)) 216 { 217 laudio_channels = configured_audio_channels; 218 lneeds_upmix = true; 219 VERBOSE(VB_AUDIO,LOC + "Needs upmix"); 220 } 221 222 ClearError(); 223 bool general_deps = (laudio_bits == audio_bits && 224 laudio_channels == audio_channels && 225 laudio_samplerate == audio_samplerate && !need_resampler && 226 laudio_passthru == audio_passthru && 227 lneeds_upmix == needs_upmix && 228 lcodec_id == codec_id && lcchannels == cchannels); 229 bool upmix_deps = 230 (lsource_audio_channels == source_audio_channels); 231 if (general_deps && upmix_deps) 232 { 233 VERBOSE(VB_AUDIO,LOC + "no change exiting"); 143 234 return; 235 } 144 236 237 if (general_deps && !upmix_deps && lneeds_upmix && upmixer) 238 { 239 upmixer->flush(); 240 source_audio_channels = lsource_audio_channels; 241 VERBOSE(VB_AUDIO,LOC + QString("source channels changed to %1").arg(source_audio_channels)); 242 return; 243 } 244 145 245 KillAudio(); 146 246 147 247 pthread_mutex_lock(&audio_buflock); … … 151 251 waud = raud = 0; 152 252 audio_actually_paused = false; 153 253 254 bool redo_stretch = (pSoundStretch && audio_channels != laudio_channels); 154 255 audio_channels = laudio_channels; 256 source_audio_channels = lsource_audio_channels; 155 257 audio_bits = laudio_bits; 156 258 audio_samplerate = laudio_samplerate; 259 audio_codec = (AVCodecContext*)laudio_codec; 157 260 audio_passthru = laudio_passthru; 261 needs_upmix = lneeds_upmix; 262 158 263 if (audio_bits != 8 && audio_bits != 16) 159 264 { 160 265 pthread_mutex_unlock(&avsync_lock); … … 162 267 Error("AudioOutput only supports 8 or 16bit audio."); 163 268 return; 164 269 } 270 165 271 audio_bytes_per_sample = audio_channels * audio_bits / 8; 272 source_audio_bytes_per_sample = source_audio_channels * audio_bits / 8; 166 273 167 274 need_resampler = false; 168 275 killaudio = false; … … 172 279 173 280 numlowbuffer = 0; 174 281 282 VERBOSE(VB_GENERAL, QString("Opening audio device '%1'. ch %2(%3) sr %4") 283 .arg(audio_main_device).arg(audio_channels) 284 .arg(source_audio_channels).arg(audio_samplerate)); 285 175 286 // Actually do the device specific open call 176 287 if (!OpenDevice()) 177 288 { 178 289 VERBOSE(VB_AUDIO, LOC_ERR + "Aborting reconfigure"); 179 290 pthread_mutex_unlock(&avsync_lock); 180 291 pthread_mutex_unlock(&audio_buflock); 292 if (GetError().isEmpty()) 293 Error("Aborting reconfigure"); 294 VERBOSE(VB_AUDIO, "Aborting reconfigure"); 181 295 return; 182 296 } 183 297 … … 200 314 current_seconds = -1; 201 315 source_bitrate = -1; 202 316 317 // NOTE: this won't do anything as above samplerate vars are set equal 203 318 // Check if we need the resampler 204 319 if (audio_samplerate != laudio_samplerate) 205 320 { … … 222 337 need_resampler = true; 223 338 } 224 339 340 if (needs_upmix) 341 { 342 VERBOSE(VB_AUDIO, LOC + QString("create upmixer")); 343 if (configured_audio_channels == 6) 344 { 345 surround_mode = gContext->GetNumSetting("AudioUpmixType", 2); 346 } 347 348 upmixer = new FreeSurround( 349 audio_samplerate, 350 source == AUDIOOUTPUT_VIDEO, 351 (FreeSurround::SurroundMode)surround_mode); 352 353 VERBOSE(VB_AUDIO, LOC + 354 QString("create upmixer done with surround mode %1") 355 .arg(surround_mode)); 356 } 357 225 358 VERBOSE(VB_AUDIO, LOC + QString("Audio Stretch Factor: %1") 226 359 .arg(audio_stretchfactor)); 360 VERBOSE(VB_AUDIO, QString("Audio Codec Used: %1") 361 .arg((audio_codec) ? 362 codec_id_string(audio_codec->codec_id) : "not set")); 227 363 228 SetStretchFactorLocked(audio_stretchfactor); 229 if (pSoundStretch) 364 if (redo_stretch) 230 365 { 231 pSoundStretch->setSampleRate(audio_samplerate); 232 pSoundStretch->setChannels(audio_channels); 366 float laudio_stretchfactor = audio_stretchfactor; 367 delete pSoundStretch; 368 pSoundStretch = NULL; 369 audio_stretchfactor = 0.0f; 370 SetStretchFactorLocked(laudio_stretchfactor); 233 371 } 372 else 373 { 374 SetStretchFactorLocked(audio_stretchfactor); 375 if (pSoundStretch) 376 { 377 // if its passthru then we need to reencode 378 if (audio_codec) 379 { 380 if (!encoder) 381 { 382 VERBOSE(VB_AUDIO, LOC + 383 QString("Creating Encoder for codec %1") 384 .arg(audio_codec->codec_id)); 234 385 386 encoder = new AudioOutputDigitalEncoder(); 387 if (!encoder->Init(audio_codec->codec_id, 388 audio_codec->bit_rate, 389 audio_codec->sample_rate, 390 audio_codec->channels 391 )) 392 { 393 // eeks 394 delete encoder; 395 encoder = NULL; 396 VERBOSE(VB_AUDIO, LOC + "Failed to Create Encoder"); 397 } 398 } 399 } 400 if (encoder) 401 { 402 pSoundStretch->setSampleRate(audio_codec->sample_rate); 403 pSoundStretch->setChannels(audio_codec->channels); 404 } 405 else 406 { 407 pSoundStretch->setSampleRate(audio_samplerate); 408 pSoundStretch->setChannels(audio_channels); 409 } 410 } 411 } 412 235 413 // Setup visualisations, zero the visualisations buffers 236 414 prepareVisuals(); 237 415 … … 290 468 pSoundStretch = NULL; 291 469 } 292 470 471 if (encoder) 472 { 473 delete encoder; 474 encoder = NULL; 475 } 476 477 if (upmixer) 478 { 479 delete upmixer; 480 upmixer = NULL; 481 } 482 needs_upmix = false; 483 293 484 CloseDevice(); 294 485 295 486 killAudioLock.unlock(); … … 303 494 304 495 void AudioOutputBase::Pause(bool paused) 305 496 { 497 VERBOSE(VB_AUDIO, LOC + QString("Pause %0").arg(paused)); 306 498 pauseaudio = paused; 307 499 audio_actually_paused = false; 308 500 } … … 385 577 The reason is that computing 'audiotime' requires acquiring the audio 386 578 lock, which the video thread should not do. So, we call 'SetAudioTime()' 387 579 from the audio thread, and then call this from the video thread. */ 388 intret;580 long long ret; 389 581 struct timeval now; 390 582 391 583 if (audiotime == 0) … … 397 589 398 590 ret = (now.tv_sec - audiotime_updated.tv_sec) * 1000; 399 591 ret += (now.tv_usec - audiotime_updated.tv_usec) / 1000; 400 ret = ( int)(ret * audio_stretchfactor);592 ret = (long long)(ret * audio_stretchfactor); 401 593 594 #if 1 595 VERBOSE(VB_AUDIO|VB_TIMESTAMP, 596 QString("GetAudiotime now=%1.%2, set=%3.%4, ret=%5, audt=%6 sf=%7") 597 .arg(now.tv_sec).arg(now.tv_usec) 598 .arg(audiotime_updated.tv_sec).arg(audiotime_updated.tv_usec) 599 .arg(ret) 600 .arg(audiotime) 601 .arg(audio_stretchfactor) 602 ); 603 #endif 604 402 605 ret += audiotime; 403 606 404 607 pthread_mutex_unlock(&avsync_lock); 405 return ret;608 return (int)ret; 406 609 } 407 610 408 611 void AudioOutputBase::SetAudiotime(void) … … 439 642 // include algorithmic latencies 440 643 if (pSoundStretch) 441 644 { 645 // add the effect of any unused but processed samples, 646 // AC3 reencode does this 647 totalbuffer += (int)(pSoundStretch->numSamples() * 648 audio_bytes_per_sample); 442 649 // add the effect of unprocessed samples in time stretch algo 443 650 totalbuffer += (int)((pSoundStretch->numUnprocessedSamples() * 444 651 audio_bytes_per_sample) / audio_stretchfactor); 445 652 } 446 653 654 if (upmixer && needs_upmix) 655 { 656 totalbuffer += upmixer->sampleLatency() * audio_bytes_per_sample; 657 } 658 447 659 audiotime = audbuf_timecode - (int)(totalbuffer * 100000.0 / 448 660 (audio_bytes_per_sample * effdspstretched)); 449 661 450 662 gettimeofday(&audiotime_updated, NULL); 663 #if 1 664 VERBOSE(VB_AUDIO|VB_TIMESTAMP, 665 QString("SetAudiotime set=%1.%2, audt=%3 atc=%4 " 666 "tb=%5 sb=%6 eds=%7 abps=%8 sf=%9") 667 .arg(audiotime_updated.tv_sec).arg(audiotime_updated.tv_usec) 668 .arg(audiotime) 669 .arg(audbuf_timecode) 670 .arg(totalbuffer) 671 .arg(soundcard_buffer) 672 .arg(effdspstretched) 673 .arg(audio_bytes_per_sample) 674 .arg(audio_stretchfactor) 675 ); 676 #endif 451 677 452 678 pthread_mutex_unlock(&avsync_lock); 453 679 pthread_mutex_unlock(&audio_buflock); … … 458 684 { 459 685 // NOTE: This function is not threadsafe 460 686 int afree = audiofree(true); 461 int abps = audio_bytes_per_sample; 687 int abps = (encoder) ? 688 encoder->audio_bytes_per_sample : audio_bytes_per_sample; 462 689 int len = samples * abps; 463 690 464 691 // Check we have enough space to write the data 465 692 if (need_resampler && src_ctx) 466 693 len = (int)ceilf(float(len) * src_data.src_ratio); 467 694 695 // include samples in upmix buffer that may be flushed 696 if (needs_upmix && upmixer) 697 len += upmixer->numUnprocessedSamples() * abps; 698 468 699 if (pSoundStretch) 469 700 len += (pSoundStretch->numUnprocessedSamples() + 470 701 (int)(pSoundStretch->numSamples()/audio_stretchfactor))*abps; … … 520 751 // NOTE: This function is not threadsafe 521 752 522 753 int afree = audiofree(true); 523 int abps = audio_bytes_per_sample; 754 int abps = (encoder) ? 755 encoder->audio_bytes_per_sample : audio_bytes_per_sample; 524 756 int len = samples * abps; 525 757 526 758 // Check we have enough space to write the data 527 759 if (need_resampler && src_ctx) 528 760 len = (int)ceilf(float(len) * src_data.src_ratio); 529 761 762 // include samples in upmix buffer that may be flushed 763 if (needs_upmix && upmixer) 764 len += upmixer->numUnprocessedSamples() * abps; 765 530 766 if (pSoundStretch) 531 767 { 532 768 len += (pSoundStretch->numUnprocessedSamples() + … … 575 811 576 812 int AudioOutputBase::WaitForFreeSpace(int samples) 577 813 { 578 int len = samples * audio_bytes_per_sample; 814 int abps = (encoder) ? 815 encoder->audio_bytes_per_sample : audio_bytes_per_sample; 816 int len = samples * abps; 579 817 int afree = audiofree(false); 580 818 581 819 while (len > afree) 582 820 { 583 821 if (blocking) 584 822 { 585 VERBOSE(VB_AUDIO , LOC + "Waiting for free space " +823 VERBOSE(VB_AUDIO|VB_TIMESTAMP, LOC + "Waiting for free space " + 586 824 QString("(need %1, available %2)").arg(len).arg(afree)); 587 825 588 826 // wait for more space … … 591 829 } 592 830 else 593 831 { 594 VERBOSE(VB_IMPORTANT, LOC_ERR + 595 "Audio buffer overflow, audio data lost!"); 596 samples = afree / audio_bytes_per_sample; 597 len = samples * audio_bytes_per_sample; 832 VERBOSE(VB_IMPORTANT, LOC_ERR + 833 QString("Audio buffer overflow, %1 audio samples lost!") 834 .arg(samples - (afree / abps))); 835 samples = afree / abps; 836 len = samples * abps; 598 837 if (src_ctx) 599 838 { 600 839 int error = src_reset(src_ctx); … … 619 858 620 859 int afree = audiofree(false); 621 860 622 VERBOSE(VB_AUDIO|VB_TIMESTAMP, 623 LOC + QString("_AddSamples bytes=%1, used=%2, free=%3, timecode=%4") 624 .arg(samples * audio_bytes_per_sample) 625 .arg(AUDBUFSIZE-afree).arg(afree).arg((long)timecode)); 861 int abps = (encoder) ? 862 encoder->audio_bytes_per_sample : audio_bytes_per_sample; 863 864 VERBOSE(VB_AUDIO|VB_TIMESTAMP, 865 LOC + QString("_AddSamples samples=%1 bytes=%2, used=%3, " 866 "free=%4, timecode=%5 needsupmix %6") 867 .arg(samples) 868 .arg(samples * abps) 869 .arg(AUDBUFSIZE-afree).arg(afree).arg(timecode) 870 .arg(needs_upmix)); 626 871 627 len = WaitForFreeSpace(samples); 628 629 if (interleaved) 872 if (upmixer && needs_upmix) 630 873 { 631 char *mybuf = (char*)buffer; 632 int bdiff = AUDBUFSIZE - org_waud; 633 if (bdiff < len) 874 int out_samples = 0; 875 int step = (interleaved)?source_audio_channels:1; 876 len = WaitForFreeSpace(samples); // test 877 for (int itemp = 0; itemp < samples; ) 634 878 { 635 memcpy(audiobuffer + org_waud, mybuf, bdiff); 636 memcpy(audiobuffer, mybuf + bdiff, len - bdiff); 879 // just in case it does a processing cycle, release the lock 880 // to allow the output loop to do output 881 pthread_mutex_unlock(&audio_buflock); 882 if (audio_bytes == 2) 883 { 884 itemp += upmixer->putSamples( 885 (short*)buffer + itemp * step, 886 samples - itemp, 887 source_audio_channels, 888 (interleaved) ? 0 : samples); 889 } 890 else 891 { 892 itemp += upmixer->putSamples( 893 (char*)buffer + itemp * step, 894 samples - itemp, 895 source_audio_channels, 896 (interleaved) ? 0 : samples); 897 } 898 pthread_mutex_lock(&audio_buflock); 899 900 int copy_samples = upmixer->numSamples(); 901 if (copy_samples) 902 { 903 int copy_len = copy_samples * abps; 904 out_samples += copy_samples; 905 if (out_samples > samples) 906 len = WaitForFreeSpace(out_samples); 907 int bdiff = AUDBUFSIZE - org_waud; 908 if (bdiff < copy_len) 909 { 910 int bdiff_samples = bdiff/abps; 911 upmixer->receiveSamples( 912 (short*)(audiobuffer + org_waud), bdiff_samples); 913 upmixer->receiveSamples( 914 (short*)(audiobuffer), (copy_samples - bdiff_samples)); 915 } 916 else 917 { 918 upmixer->receiveSamples( 919 (short*)(audiobuffer + org_waud), copy_samples); 920 } 921 org_waud = (org_waud + copy_len) % AUDBUFSIZE; 922 } 637 923 } 638 else 639 memcpy(audiobuffer + org_waud, mybuf, len); 640 641 org_waud = (org_waud + len) % AUDBUFSIZE; 642 } 643 else 924 925 if (samples > 0) 926 len = WaitForFreeSpace(out_samples); 927 928 samples = out_samples; 929 } 930 else 644 931 { 645 char **mybuf = (char**)buffer; 646 for (int itemp = 0; itemp < samples * audio_bytes; itemp += audio_bytes) 932 len = WaitForFreeSpace(samples); 933 934 if (interleaved) 647 935 { 648 for (int chan = 0; chan < audio_channels; chan++) 936 char *mybuf = (char*)buffer; 937 int bdiff = AUDBUFSIZE - org_waud; 938 if (bdiff < len) 649 939 { 650 audiobuffer[org_waud++] = mybuf[chan][itemp]; 651 if (audio_bits == 16) 652 audiobuffer[org_waud++] = mybuf[chan][itemp+1]; 940 memcpy(audiobuffer + org_waud, mybuf, bdiff); 941 memcpy(audiobuffer, mybuf + bdiff, len - bdiff); 942 } 943 else 944 { 945 memcpy(audiobuffer + org_waud, mybuf, len); 946 } 947 948 org_waud = (org_waud + len) % AUDBUFSIZE; 949 } 950 else 951 { 952 char **mybuf = (char**)buffer; 953 for (int itemp = 0; itemp < samples * audio_bytes; 954 itemp += audio_bytes) 955 { 956 for (int chan = 0; chan < audio_channels; chan++) 957 { 958 audiobuffer[org_waud++] = mybuf[chan][itemp]; 959 if (audio_bits == 16) 960 audiobuffer[org_waud++] = mybuf[chan][itemp+1]; 653 961 654 if (org_waud >= AUDBUFSIZE) 655 org_waud -= AUDBUFSIZE; 962 if (org_waud >= AUDBUFSIZE) 963 org_waud -= AUDBUFSIZE; 964 } 656 965 } 657 966 } 658 967 } 659 968 660 if ( pSoundStretch)969 if (samples > 0) 661 970 { 662 // does not change the timecode, only the number of samples 663 // back to orig pos 664 org_waud = waud; 665 int bdiff = AUDBUFSIZE - org_waud; 666 int nSamplesToEnd = bdiff/audio_bytes_per_sample; 667 if (bdiff < len) 971 if (pSoundStretch) 668 972 { 669 pSoundStretch->putSamples((soundtouch::SAMPLETYPE*)(audiobuffer +670 org_waud), nSamplesToEnd);671 pSoundStretch->putSamples((soundtouch::SAMPLETYPE*)audiobuffer,672 (len - bdiff) / audio_bytes_per_sample);673 }674 else675 {676 pSoundStretch->putSamples((soundtouch::SAMPLETYPE*)(audiobuffer +677 org_waud), len / audio_bytes_per_sample);678 }679 973 680 int newLen = 0; 681 int nSamples; 682 len = WaitForFreeSpace(pSoundStretch->numSamples() * 683 audio_bytes_per_sample); 684 do 685 { 686 int samplesToGet = len/audio_bytes_per_sample; 687 if (samplesToGet > nSamplesToEnd) 974 // does not change the timecode, only the number of samples 975 // back to orig pos 976 org_waud = waud; 977 int bdiff = AUDBUFSIZE - org_waud; 978 int nSamplesToEnd = bdiff/abps; 979 if (bdiff < len) 688 980 { 689 samplesToGet = nSamplesToEnd; 981 pSoundStretch->putSamples((soundtouch::SAMPLETYPE*) 982 (audiobuffer + 983 org_waud), nSamplesToEnd); 984 pSoundStretch->putSamples((soundtouch::SAMPLETYPE*)audiobuffer, 985 (len - bdiff) / abps); 690 986 } 987 else 988 { 989 pSoundStretch->putSamples((soundtouch::SAMPLETYPE*) 990 (audiobuffer + org_waud), 991 len / abps); 992 } 691 993 692 nSamples = pSoundStretch->receiveSamples((soundtouch::SAMPLETYPE*) 693 (audiobuffer + org_waud), samplesToGet); 694 if (nSamples == nSamplesToEnd) 994 if (encoder) 695 995 { 696 org_waud = 0; 697 nSamplesToEnd = AUDBUFSIZE/audio_bytes_per_sample; 996 // pull out a packet's worth and reencode it until we 997 // don't have enough for any more packets 998 soundtouch::SAMPLETYPE *temp_buff = 999 (soundtouch::SAMPLETYPE*)encoder->GetFrameBuffer(); 1000 size_t frameSize = encoder->FrameSize()/abps; 1001 1002 VERBOSE(VB_AUDIO|VB_TIMESTAMP, 1003 QString("_AddSamples Enc sfs=%1 bfs=%2 sss=%3") 1004 .arg(frameSize) 1005 .arg(encoder->FrameSize()) 1006 .arg(pSoundStretch->numSamples())); 1007 1008 // process the same number of samples as it creates 1009 // a full encoded buffer just like before 1010 while (pSoundStretch->numSamples() >= frameSize) 1011 { 1012 int got = pSoundStretch->receiveSamples( 1013 temp_buff, frameSize); 1014 int amount = encoder->Encode(temp_buff); 1015 1016 VERBOSE(VB_AUDIO|VB_TIMESTAMP, 1017 QString("_AddSamples Enc bytes=%1 got=%2 left=%3") 1018 .arg(amount) 1019 .arg(got) 1020 .arg(pSoundStretch->numSamples())); 1021 1022 if (!amount) 1023 continue; 1024 1025 //len = WaitForFreeSpace(amount); 1026 char *ob = encoder->GetOutBuff(); 1027 if (amount >= bdiff) 1028 { 1029 memcpy(audiobuffer + org_waud, ob, bdiff); 1030 ob += bdiff; 1031 amount -= bdiff; 1032 org_waud = 0; 1033 } 1034 if (amount > 0) 1035 memcpy(audiobuffer + org_waud, ob, amount); 1036 1037 bdiff = AUDBUFSIZE - amount; 1038 org_waud += amount; 1039 } 698 1040 } 699 1041 else 700 1042 { 701 org_waud += nSamples * audio_bytes_per_sample; 702 nSamplesToEnd -= nSamples; 1043 int newLen = 0; 1044 int nSamples; 1045 len = WaitForFreeSpace(pSoundStretch->numSamples() * 1046 audio_bytes_per_sample); 1047 do 1048 { 1049 int samplesToGet = len/audio_bytes_per_sample; 1050 if (samplesToGet > nSamplesToEnd) 1051 { 1052 samplesToGet = nSamplesToEnd; 1053 } 1054 1055 nSamples = pSoundStretch->receiveSamples( 1056 (soundtouch::SAMPLETYPE*) 1057 (audiobuffer + org_waud), samplesToGet); 1058 if (nSamples == nSamplesToEnd) 1059 { 1060 org_waud = 0; 1061 nSamplesToEnd = AUDBUFSIZE/audio_bytes_per_sample; 1062 } 1063 else 1064 { 1065 org_waud += nSamples * audio_bytes_per_sample; 1066 nSamplesToEnd -= nSamples; 1067 } 1068 1069 newLen += nSamples * audio_bytes_per_sample; 1070 len -= nSamples * audio_bytes_per_sample; 1071 } while (nSamples > 0); 703 1072 } 1073 } 704 1074 705 newLen += nSamples * audio_bytes_per_sample; 706 len -= nSamples * audio_bytes_per_sample; 707 } while (nSamples > 0); 708 } 1075 waud = org_waud; 1076 lastaudiolen = audiolen(false); 709 1077 710 waud = org_waud; 711 lastaudiolen = audiolen(false); 1078 if (timecode < 0) 1079 { 1080 // mythmusic doesn't give timestamps.. 1081 timecode = (int)((samples_buffered * 100000.0) / effdsp); 1082 } 1083 1084 samples_buffered += samples; 1085 1086 /* we want the time at the end -- but the file format stores 1087 time at the start of the chunk. */ 1088 // even with timestretch, timecode is still calculated from original 1089 // sample count 1090 audbuf_timecode = timecode + (int)((samples * 100000.0) / effdsp); 712 1091 713 samples_buffered += samples; 714 715 if (timecode < 0) 716 { 717 // mythmusic doesn't give timestamps.. 718 timecode = (int)((samples_buffered * 100000.0) / effdsp); 1092 if (interleaved) 1093 { 1094 dispatchVisual((unsigned char *)buffer, len, timecode, 1095 source_audio_channels, audio_bits); 1096 } 719 1097 } 720 721 /* we want the time at the end -- but the file format stores722 time at the start of the chunk. */723 // even with timestretch, timecode is still calculated from original724 // sample count725 audbuf_timecode = timecode + (int)((samples * 100000.0) / effdsp);726 1098 727 if (interleaved)728 dispatchVisual((unsigned char *)buffer, len, timecode, audio_channels, audio_bits);729 730 1099 pthread_mutex_unlock(&audio_buflock); 731 1100 } 732 1101 … … 739 1108 740 1109 if (source_bitrate == -1) 741 1110 { 742 source_bitrate = audio_samplerate * audio_channels * audio_bits;1111 source_bitrate = audio_samplerate * source_audio_channels * audio_bits; 743 1112 } 744 1113 745 1114 if (ct / 1000 != current_seconds) … … 747 1116 current_seconds = ct / 1000; 748 1117 OutputEvent e(current_seconds, ct, 749 1118 source_bitrate, audio_samplerate, audio_bits, 750 audio_channels);1119 source_audio_channels); 751 1120 dispatch(e); 752 1121 } 753 1122 } … … 785 1154 786 1155 space_on_soundcard = getSpaceOnSoundcard(); 787 1156 788 if (space_on_soundcard != last_space_on_soundcard) { 789 VERBOSE(VB_AUDIO, LOC + QString("%1 bytes free on soundcard") 1157 if (space_on_soundcard != last_space_on_soundcard) 1158 { 1159 VERBOSE(VB_AUDIO|VB_TIMESTAMP, 1160 LOC + QString("%1 bytes free on soundcard") 790 1161 .arg(space_on_soundcard)); 1162 791 1163 last_space_on_soundcard = space_on_soundcard; 792 1164 } 793 1165 … … 799 1171 WriteAudio(zeros, fragment_size); 800 1172 } else { 801 1173 // this should never happen now -dag 802 VERBOSE(VB_AUDIO , LOC +1174 VERBOSE(VB_AUDIO|VB_TIMESTAMP, LOC + 803 1175 QString("waiting for space on soundcard " 804 1176 "to write zeros: have %1 need %2") 805 1177 .arg(space_on_soundcard).arg(fragment_size)); … … 835 1207 if (fragment_size > audiolen(true)) 836 1208 { 837 1209 if (audiolen(true) > 0) // only log if we're sending some audio 838 VERBOSE(VB_AUDIO , LOC +1210 VERBOSE(VB_AUDIO|VB_TIMESTAMP, LOC + 839 1211 QString("audio waiting for buffer to fill: " 840 1212 "have %1 want %2") 841 1213 .arg(audiolen(true)).arg(fragment_size)); 842 1214 843 VERBOSE(VB_AUDIO, LOC + "Broadcasting free space avail"); 1215 //VERBOSE(VB_AUDIO|VB_TIMESTAMP, 1216 //LOC + "Broadcasting free space avail"); 844 1217 pthread_mutex_lock(&audio_buflock); 845 1218 pthread_cond_broadcast(&audio_bufsig); 846 1219 pthread_mutex_unlock(&audio_buflock); … … 854 1227 if (fragment_size > space_on_soundcard) 855 1228 { 856 1229 if (space_on_soundcard != last_space_on_soundcard) { 857 VERBOSE(VB_AUDIO , LOC +1230 VERBOSE(VB_AUDIO|VB_TIMESTAMP, LOC + 858 1231 QString("audio waiting for space on soundcard: " 859 1232 "have %1 need %2") 860 1233 .arg(space_on_soundcard).arg(fragment_size)); … … 916 1289 917 1290 /* update raud */ 918 1291 raud = (raud + fragment_size) % AUDBUFSIZE; 919 VERBOSE(VB_AUDIO, LOC + "Broadcasting free space avail");1292 //VERBOSE(VB_AUDIO|VB_TIMESTAMP, LOC + "Broadcasting free space avail"); 920 1293 pthread_cond_broadcast(&audio_bufsig); 921 1294 922 1295 written_size = fragment_size; -
libs/libmyth/audiooutputalsa.cpp
89 89 } 90 90 else 91 91 { 92 fragment_size = 6144; // nicely divisible by 2,4,6,8 channels @ 16-bits 93 buffer_time = 500000; // .5 seconds 92 fragment_size = 93 (audio_bits * audio_channels * audio_samplerate) / (8*30); 94 buffer_time = 100000; 94 95 period_time = buffer_time / 4; // 4 interrupts per buffer 95 96 } 96 97 … … 162 163 163 164 tmpbuf = aubuf; 164 165 165 VERBOSE(VB_AUDIO, QString("WriteAudio: Preparing %1 bytes (%2 frames)") 166 VERBOSE(VB_AUDIO|VB_TIMESTAMP, 167 QString("WriteAudio: Preparing %1 bytes (%2 frames)") 166 168 .arg(size).arg(frames)); 167 169 168 170 while (frames > 0) -
libs/libmyth/audiooutputdigitalencoder.cpp
209 209 extern "C" int ac3_sync(const uint8_t *buf, int *channels, int *sample_rate, 210 210 int *bit_rate, int *samples); 211 211 212 // from http://www.ebu.ch/CMSimages/en/tec_AES-EBU_eg_tcm6-11890.pdf 213 // http://en.wikipedia.org/wiki/S/PDIF 214 typedef struct { 215 // byte 0 216 unsigned professional_consumer:1; 217 unsigned non_data:1; 218 // 4 - no emphasis 219 // 6 - 50/15us 220 // 7 - CCITT J17 221 unsigned audio_signal_emphasis:3; 222 unsigned SSFL:1; 223 // 0 224 // 1 - 48k 225 // 2 - 44.1k 226 // 3 - 32k 227 unsigned sample_frequency:2; 228 // byte 1 229 // 0 230 // 1 - 2 ch 231 // 2 - mono 232 // 3 - prim/sec 233 // 4 - stereo 234 unsigned channel_mode:4; 235 // 0 236 // 1 - 192 bit block 237 // 2 - AES18 238 // 3 - user def 239 unsigned user_bit_management:4; 240 // byte 2 241 // 1 - audio data 242 // 2 - co-ordn 243 unsigned auxiliary_bits:3; 244 // 4 - 16 bits 245 // 5-7 - redither to 16 bits 246 unsigned source_word_length:3; 247 unsigned reserved:2; 248 // byte 3 249 unsigned multi_channel_function_description:8; 250 // byte 4 251 unsigned digital_audio_reference_signal:2; 252 unsigned reserved2:6; 253 254 } AESHeader; 255 212 256 static int encode_frame( 213 257 bool dts, 214 258 unsigned char *data, … … 239 283 #ifdef ENABLE_AC3_DECODER 240 284 enc_len = ac3_sync( 241 285 data + 8, &flags, &sample_rate, &bit_rate, (int*)&block_len); 286 block_len *= 2 * 2; 242 287 #else 243 288 enc_len = a52_syncinfo(data + 8, &flags, &sample_rate, &bit_rate); 244 289 block_len = MAX_AC3_FRAME_SIZE; … … 263 308 264 309 // the following values come from libmpcodecs/ad_hwac3.c in mplayer. 265 310 // they form a valid IEC958 AC3 header. 311 266 312 data[0] = 0x72; 267 313 data[1] = 0xF8; 268 314 data[2] = 0x1F; … … 272 318 { 273 319 switch(nr_samples) 274 320 { 321 case 256: 322 data[4] = 0x0A; /* DTS-? (256-sample bursts) */ 323 break; 324 275 325 case 512: 276 326 data[4] = 0x0B; /* DTS-1 (512-sample bursts) */ 277 327 break; … … 284 334 data[4] = 0x0D; /* DTS-3 (2048-sample bursts) */ 285 335 break; 286 336 337 case 4096: 338 data[4] = 0x0E; /* DTS-? (4096-sample bursts) */ 339 break; 340 287 341 default: 288 342 VERBOSE(VB_IMPORTANT, LOC + 289 343 QString("DTS: %1-sample bursts not supported") -
libs/libmythfreesurround/el_processor.cpp
1 /* 2 Copyright (C) 2007 Christian Kothe 3 4 This program is free software; you can redistribute it and/or 5 modify it under the terms of the GNU General Public License 6 as published by the Free Software Foundation; either version 2 7 of the License, or (at your option) any later version. 8 9 This program is distributed in the hope that it will be useful, 10 but WITHOUT ANY WARRANTY; without even the implied warranty of 11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 GNU General Public License for more details. 13 14 You should have received a copy of the GNU General Public License 15 along with this program; if not, write to the Free Software 16 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. 17 */ 18 19 #include "el_processor.h" 20 #include <complex> 21 #include <cmath> 22 #include <vector> 23 #include "fftw3.h" 24 25 #define FILTERED_LFE 26 27 #pragma comment (lib,"libfftw3f-3.lib") 28 29 typedef std::complex<float> cfloat; 30 31 const float PI = 3.141592654; 32 const float epsilon = 0.000001; 33 //const float center_level = 0.5*sqrt(0.5); // gain of the center channel 34 const float center_level = sqrt(0.5); // gain of the center channel 35 //const float center_level = 0.5; // gain of the center channel 36 37 // should be .6-.7 38 // but with centerlevel 2x what its supposed to be, we halve 0.68 39 // to keep center from clipping 40 const float window_gain = 0.34; 41 42 // private implementation of the surround decoder 43 class decoder_impl { 44 public: 45 // create an instance of the decoder 46 // blocksize is fixed over the lifetime of this object for performance reasons 47 decoder_impl(unsigned blocksize=8192): N(blocksize), halfN(blocksize/2) { 48 // create FFTW buffers 49 lt = (float*)fftwf_malloc(sizeof(float)*N); 50 rt = (float*)fftwf_malloc(sizeof(float)*N); 51 dst = (float*)fftwf_malloc(sizeof(float)*N); 52 dftL = (fftwf_complex*)fftwf_malloc(sizeof(fftwf_complex)*N); 53 dftR = (fftwf_complex*)fftwf_malloc(sizeof(fftwf_complex)*N); 54 src = (fftwf_complex*)fftwf_malloc(sizeof(fftwf_complex)*N); 55 loadL = fftwf_plan_dft_r2c_1d(N, lt, dftL,FFTW_MEASURE); 56 loadR = fftwf_plan_dft_r2c_1d(N, rt, dftR,FFTW_MEASURE); 57 store = fftwf_plan_dft_c2r_1d(N, src, dst,FFTW_MEASURE); 58 // resize our own buffers 59 frontR.resize(N); 60 frontL.resize(N); 61 avg.resize(N); 62 surR.resize(N); 63 surL.resize(N); 64 #ifdef FILTERED_LFE 65 trueavg.resize(N); 66 #endif 67 xfs.resize(N); 68 yfs.resize(N); 69 inbuf[0].resize(N); 70 inbuf[1].resize(N); 71 for (unsigned c=0;c<6;c++) { 72 outbuf[c].resize(N); 73 filter[c].resize(N); 74 } 75 // DC component of filters is always 0 76 for (unsigned c=0;c<5;c++) 77 { 78 filter[c][0] = 0.0; 79 filter[c][1] = 0.0; 80 filter[c][halfN] = 0.0; 81 } 82 sample_rate(48000); 83 // generate the window function (square root of hann, b/c it is applied before and after the transform) 84 wnd.resize(N); 85 // dft normalization included in the window for zero cost scaling 86 // also add a gain factor of *2 due to processing gain in algo (see center_level) 87 for (unsigned k=0;k<N;k++) 88 //wnd[k] = sqrt(0.5*(1-cos(2*PI*k/N))/N); 89 wnd[k] = sqrt(window_gain*0.5*(1-cos(2*PI*k/N))/N); 90 current_buf = 0; 91 // set the default coefficients 92 surround_coefficients(0.8165,0.5774); 93 phase_mode(0); 94 separation(1,1); 95 steering_mode(1); 96 } 97 98 // destructor 99 ~decoder_impl() { 100 // clean up the FFTW stuff 101 fftwf_destroy_plan(store); 102 fftwf_destroy_plan(loadR); 103 fftwf_destroy_plan(loadL); 104 fftwf_free(src); 105 fftwf_free(dftR); 106 fftwf_free(dftL); 107 fftwf_free(dst); 108 fftwf_free(rt); 109 fftwf_free(lt); 110 } 111 112 float ** getInputBuffers() 113 { 114 inbufs[0] = &inbuf[0][current_buf*halfN]; 115 inbufs[1] = &inbuf[1][current_buf*halfN]; 116 return inbufs; 117 } 118 119 float ** getOutputBuffers() 120 { 121 outbufs[0] = &outbuf[0][current_buf*halfN]; 122 outbufs[1] = &outbuf[1][current_buf*halfN]; 123 outbufs[2] = &outbuf[2][current_buf*halfN]; 124 outbufs[3] = &outbuf[3][current_buf*halfN]; 125 outbufs[4] = &outbuf[4][current_buf*halfN]; 126 outbufs[5] = &outbuf[5][current_buf*halfN]; 127 return outbufs; 128 } 129 130 // decode a chunk of stereo sound, has to contain exactly blocksize samples 131 // center_width [0..1] distributes the center information towards the front left/right channels, 1=full distribution, 0=no distribution 132 // dimension [0..1] moves the soundfield backwards, 0=front, 1=side 133 // adaption_rate [0..1] determines how fast the steering gets adapted, 1=instantaneous, 0.1 = very slow adaption 134 void decode(float center_width, float dimension, float adaption_rate) { 135 // process first part 136 int index; 137 index = current_buf*halfN; 138 float *in_second[2] = {&inbuf[0][index],&inbuf[1][index]}; 139 current_buf ^= 1; 140 index = current_buf*halfN; 141 float *in_first[2] = {&inbuf[0][index],&inbuf[1][index]}; 142 add_output(in_first,in_second,center_width,dimension,adaption_rate,true); 143 // shift last half of input buffer to the beginning 144 } 145 146 // flush the internal buffers 147 void flush() { 148 for (unsigned k=0;k<N;k++) { 149 for (unsigned c=0;c<6;c++) 150 outbuf[c][k] = 0; 151 inbuf[0][k] = 0; 152 inbuf[1][k] = 0; 153 } 154 } 155 156 // set lfe filter params 157 void sample_rate(unsigned int srate) { 158 // lfe filter is just straight through band limited 159 unsigned int cutoff = (250*N)/srate; 160 for (unsigned f=0;f<=halfN;f++) { 161 if ((f>=2) && (f<cutoff)) 162 filter[5][f] = 1.0; 163 else 164 filter[5][f] = 0.0; 165 } 166 } 167 168 // set the assumed surround mixing coefficients 169 void surround_coefficients(float a, float b) { 170 master_gain = 1.0; 171 // calc the simple coefficients 172 surround_high = a; 173 surround_low = b; 174 surround_balance = (a-b)/(a+b); 175 surround_level = 1/(a+b); 176 // calc the linear coefficients 177 cfloat i(0,1), u((a+b)*i), v((b-a)*i), n(0.25,0),o(1,0); 178 A = (v-o)*n; B = (o-u)*n; C = (-o-v)*n; D = (o+u)*n; 179 E = (o+v)*n; F = (o+u)*n; G = (o-v)*n; H = (o-u)*n; 180 } 181 182 // set the phase shifting mode 183 void phase_mode(unsigned mode) { 184 const float modes[4][2] = {{0,0},{0,PI},{PI,0},{-PI/2,PI/2}}; 185 phase_offsetL = modes[mode][0]; 186 phase_offsetR = modes[mode][1]; 187 } 188 189 // what steering mode should be chosen 190 void steering_mode(bool mode) { linear_steering = mode; } 191 192 // set front & rear separation controls 193 void separation(float front, float rear) { 194 front_separation = front; 195 rear_separation = rear; 196 } 197 198 private: 199 // polar <-> cartesian coodinates conversion 200 static inline float amplitude(const float cf[2]) { return sqrt(cf[0]*cf[0] + cf[1]*cf[1]); } 201 static inline float phase(const float cf[2]) { return atan2(cf[1],cf[0]); } 202 static inline cfloat polar(float a, float p) { return cfloat(a*cos(p),a*sin(p)); } 203 static inline float sqr(float x) { return x*x; } 204 // the dreaded min/max 205 static inline float min(float a, float b) { return a<b?a:b; } 206 static inline float max(float a, float b) { return a>b?a:b; } 207 static inline float clamp(float x) { return max(-1,min(1,x)); } 208 209 // handle the output buffering for overlapped calls of block_decode 210 void add_output(float *input1[2], float *input2[2], float center_width, float dimension, float adaption_rate, bool result=false) { 211 // add the windowed data to the last 1/2 of the output buffer 212 float *out[6] = {&outbuf[0][0],&outbuf[1][0],&outbuf[2][0],&outbuf[3][0],&outbuf[4][0],&outbuf[5][0]}; 213 block_decode(input1,input2,out,center_width,dimension,adaption_rate); 214 } 215 216 // CORE FUNCTION: decode a block of data 217 void block_decode(float *input1[2], float *input2[2], float *output[6], float center_width, float dimension, float adaption_rate) { 218 // 1. scale the input by the window function; this serves a dual purpose: 219 // - first it improves the FFT resolution b/c boundary discontinuities (and their frequencies) get removed 220 // - second it allows for smooth blending of varying filters between the blocks 221 { 222 float* pWnd = &wnd[0]; 223 float* pLt = <[0]; 224 float* pRt = &rt[0]; 225 float* pIn0 = input1[0]; 226 float* pIn1 = input1[1]; 227 for (unsigned k=0;k<halfN;k++) { 228 *pLt++ = *pIn0++ * *pWnd; 229 *pRt++ = *pIn1++ * *pWnd++; 230 } 231 pIn0 = input2[0]; 232 pIn1 = input2[1]; 233 //for (unsigned k=0,k1=halfN;k<halfN;k++,k1++) { 234 for (unsigned k=0;k<halfN;k++) { 235 *pLt++ = *pIn0++ * *pWnd; 236 *pRt++ = *pIn1++ * *pWnd++; 237 } 238 } 239 240 // ... and tranform it into the frequency domain 241 fftwf_execute(loadL); 242 fftwf_execute(loadR); 243 244 // 2. compare amplitude and phase of each DFT bin and produce the X/Y coordinates in the sound field 245 // but dont do DC or N/2 component 246 for (unsigned f=2;f<halfN;f++) { 247 // get left/right amplitudes/phases 248 float ampL = amplitude(dftL[f]), ampR = amplitude(dftR[f]); 249 float phaseL = phase(dftL[f]), phaseR = phase(dftR[f]); 250 // if (ampL+ampR < epsilon) 251 // continue; 252 253 // calculate the amplitude/phase difference 254 float ampDiff = clamp((ampL+ampR < epsilon) ? 0 : (ampR-ampL) / (ampR+ampL)); 255 float phaseDiff = phaseL - phaseR; 256 if (phaseDiff < -PI) phaseDiff += 2*PI; 257 if (phaseDiff > PI) phaseDiff -= 2*PI; 258 phaseDiff = abs(phaseDiff); 259 260 if (linear_steering) { 261 /* cfloat w = polar(sqrt(ampL*ampL+ampR*ampR), (phaseL+phaseR)/2); 262 cfloat lt = cfloat(dftL[f][0],dftL[f][1])/w, rt = cfloat(dftR[f][0],dftR[f][1])/w; */ 263 // xfs[f] = -(C*(rt-H) - B*E + F*A + G*(D-lt)) / (G*A - C*E).real(); 264 // yfs[f] = (rt - (xfs[f]*E+H))/(F+xfs[f]*G); 265 266 /* 267 Problem: 268 This assumes that the values are interpolated linearly between the cardinal points. 269 But this way we have no chance of knowing the average volume... 270 - Can we solve that computing everything under the assumption of normalized volume? 271 No. Seemingly not. 272 - Maybe we should add w explitcitly into the equation and see if we can solve it... 273 */ 274 275 276 //cfloat lt(0.5,0),rt(0.5,0); 277 //cfloat x(0,0), y(1,0); 278 /*cfloat p = (C*(rt-H) - B*E + F*A + G*(D-lt)) / (G*A - C*E); 279 cfloat q = B*(rt+H) + F*(D-lt) / (G*A - C*E); 280 cfloat s = sqrt(p*p/4.0f - q); 281 cfloat x = -p; 282 cfloat x1 = -p/2.0f + s; 283 cfloat x2 = -p/2.0f - s; 284 float x = 0; 285 if (x1.real() >= -1 && x1.real() <= 1) 286 x = x1.real(); 287 else if (x2.real() >= -1 && x2.real() <= 1) 288 x = x2.real();*/ 289 290 //cfloat yp = (rt - (x*E+H))/(F+x*G); 291 //cfloat xp = (lt - (y*B+D))/(A+y*C); 292 293 /*xfs[f] = x; 294 yfs[f] = y.real();*/ 295 296 // --- this is the fancy new linear mode --- 297 298 // get sound field x/y position 299 yfs[f] = get_yfs(ampDiff,phaseDiff); 300 xfs[f] = get_xfs(ampDiff,yfs[f]); 301 302 // add dimension control 303 yfs[f] = clamp(yfs[f] - dimension); 304 305 // add crossfeed control 306 xfs[f] = clamp(xfs[f] * (front_separation*(1+yfs[f])/2 + rear_separation*(1-yfs[f])/2)); 307 308 // 3. generate frequency filters for each output channel 309 float left = (1-xfs[f])/2, right = (1+xfs[f])/2; 310 float front = (1+yfs[f])/2, back = (1-yfs[f])/2; 311 float volume[5] = { 312 front * (left * center_width + max(0,-xfs[f]) * (1-center_width)), // left 313 front * center_level*((1-abs(xfs[f])) * (1-center_width)), // center 314 front * (right * center_width + max(0, xfs[f]) * (1-center_width)), // right 315 back * surround_level * left, // left surround 316 back * surround_level * right // right surround 317 }; 318 319 // adapt the prior filter 320 for (unsigned c=0;c<5;c++) 321 filter[c][f] = (1-adaption_rate)*filter[c][f] + adaption_rate*volume[c]/*/N*/; 322 323 } else { 324 // --- this is the old & simple steering mode --- 325 326 // calculate the amplitude/phase difference 327 float ampDiff = clamp((ampL+ampR < epsilon) ? 0 : (ampR-ampL) / (ampR+ampL)); 328 float phaseDiff = phaseL - phaseR; 329 if (phaseDiff < -PI) phaseDiff += 2*PI; 330 if (phaseDiff > PI) phaseDiff -= 2*PI; 331 phaseDiff = abs(phaseDiff); 332 333 // determine sound field x-position 334 xfs[f] = ampDiff; 335 336 // determine preliminary sound field y-position from phase difference 337 yfs[f] = 1 - (phaseDiff/PI)*2; 338 339 if (abs(xfs[f]) > surround_balance) { 340 // blend linearly between the surrounds and the fronts if the balance exceeds the surround encoding balance 341 // this is necessary because the sound field is trapezoidal and will be stretched behind the listener 342 float frontness = (abs(xfs[f]) - surround_balance)/(1-surround_balance); 343 yfs[f] = (1-frontness) * yfs[f] + frontness * 1; 344 } 345 346 // add dimension control 347 yfs[f] = clamp(yfs[f] - dimension); 348 349 // add crossfeed control 350 xfs[f] = clamp(xfs[f] * (front_separation*(1+yfs[f])/2 + rear_separation*(1-yfs[f])/2)); 351 352 // 3. generate frequency filters for each output channel, according to the signal position 353 // the sum of all channel volumes must be 1.0 354 float left = (1-xfs[f])/2, right = (1+xfs[f])/2; 355 float front = (1+yfs[f])/2, back = (1-yfs[f])/2; 356 float volume[5] = { 357 front * (left * center_width + max(0,-xfs[f]) * (1-center_width)), // left 358 front * center_level*((1-abs(xfs[f])) * (1-center_width)), // center 359 front * (right * center_width + max(0, xfs[f]) * (1-center_width)), // right 360 back * surround_level*max(0,min(1,((1-(xfs[f]/surround_balance))/2))), // left surround 361 back * surround_level*max(0,min(1,((1+(xfs[f]/surround_balance))/2))) // right surround 362 }; 363 364 // adapt the prior filter 365 for (unsigned c=0;c<5;c++) 366 filter[c][f] = (1-adaption_rate)*filter[c][f] + adaption_rate*volume[c]/*/N*/; 367 } 368 369 // ... and build the signal which we want to position 370 frontL[f] = polar(ampL+ampR,phaseL); 371 frontR[f] = polar(ampL+ampR,phaseR); 372 avg[f] = frontL[f] + frontR[f]; 373 surL[f] = polar(ampL+ampR,phaseL+phase_offsetL); 374 surR[f] = polar(ampL+ampR,phaseR+phase_offsetR); 375 #ifdef FILTERED_LFE 376 trueavg[f] = cfloat(dftL[f][0] + dftR[f][0], dftL[f][1] + dftR[f][1]); 377 #endif 378 } 379 380 // 4. distribute the unfiltered reference signals over the channels 381 apply_filter(&frontL[0],&filter[0][0],&output[0][0]); // front left 382 apply_filter(&avg[0], &filter[1][0],&output[1][0]); // front center 383 apply_filter(&frontR[0],&filter[2][0],&output[2][0]); // front right 384 apply_filter(&surL[0],&filter[3][0],&output[3][0]); // surround left 385 apply_filter(&surR[0],&filter[4][0],&output[4][0]); // surround right 386 #ifdef FILTERED_LFE 387 apply_filter(&trueavg[0],&filter[5][0],&output[5][0]); // lfe 388 #else 389 float* out5 = &output[5][(current_buf)*halfN]; 390 float* in2l = &input2[0][0]; 391 float* in2r = &input2[1][0]; 392 //for (unsigned k=0,k2=N/4;k<halfN;k++,k2++) { 393 for (unsigned k=0;k<halfN;k++) { 394 *out5++ = *in2l++ + *in2r++; 395 } 396 #endif 397 } 398 399 #define FASTER_CALC 400 // map from amplitude difference and phase difference to yfs 401 inline double get_yfs(double ampDiff, double phaseDiff) { 402 double x = 1-(((1-sqr(ampDiff))*phaseDiff)/PI*2); 403 #ifdef FASTER_CALC 404 double tanX = tan(x); 405 return 0.16468622925824683 + 0.5009268347818189*x - 0.06462757726992101*x*x 406 + 0.09170680403453149*x*x*x + 0.2617754892323973*tanX - 0.04180413533856156*sqr(tanX); 407 #else 408 return 0.16468622925824683 + 0.5009268347818189*x - 0.06462757726992101*x*x 409 + 0.09170680403453149*x*x*x + 0.2617754892323973*tan(x) - 0.04180413533856156*sqr(tan(x)); 410 #endif 411 } 412 413 // map from amplitude difference and yfs to xfs 414 inline double get_xfs(double ampDiff, double yfs) { 415 double x=ampDiff,y=yfs; 416 #ifdef FASTER_CALC 417 double tanX = tan(x); 418 double tanY = tan(y); 419 double asinX = asin(x); 420 double sinX = sin(x); 421 double sinY = sin(y); 422 double x3 = x*x*x; 423 double y2 = y*y; 424 double y3 = y*y2; 425 return 2.464833559224702*x - 423.52131153259404*x*y + 426 67.8557858606918*x3*y + 788.2429425544392*x*y2 - 427 79.97650354902909*x3*y2 - 513.8966153850349*x*y3 + 428 35.68117670186306*x3*y3 + 13867.406173420834*y*asinX - 429 2075.8237075786396*y2*asinX - 908.2722068360281*y3*asinX - 430 12934.654772878019*asinX*sinY - 13216.736529661162*y*tanX + 431 1288.6463247741938*y2*tanX + 1384.372969378453*y3*tanX + 432 12699.231471126128*sinY*tanX + 95.37131275594336*sinX*tanY - 433 91.21223198407546*tanX*tanY; 434 #else 435 return 2.464833559224702*x - 423.52131153259404*x*y + 436 67.8557858606918*x*x*x*y + 788.2429425544392*x*y*y - 437 79.97650354902909*x*x*x*y*y - 513.8966153850349*x*y*y*y + 438 35.68117670186306*x*x*x*y*y*y + 13867.406173420834*y*asin(x) - 439 2075.8237075786396*y*y*asin(x) - 908.2722068360281*y*y*y*asin(x) - 440 12934.654772878019*asin(x)*sin(y) - 13216.736529661162*y*tan(x) + 441 1288.6463247741938*y*y*tan(x) + 1384.372969378453*y*y*y*tan(x) + 442 12699.231471126128*sin(y)*tan(x) + 95.37131275594336*sin(x)*tan(y) - 443 91.21223198407546*tan(x)*tan(y); 444 #endif 445 } 446 447 // filter the complex source signal and add it to target 448 void apply_filter(cfloat *signal, float *flt, float *target) { 449 // filter the signal 450 for (unsigned f=0;f<=halfN;f++) { 451 src[f][0] = signal[f].real() * flt[f]; 452 src[f][1] = signal[f].imag() * flt[f]; 453 } 454 // transform into time domain 455 fftwf_execute(store); 456 457 float* pT1 = &target[current_buf*halfN]; 458 float* pWnd1 = &wnd[0]; 459 float* pDst1 = &dst[0]; 460 float* pT2 = &target[(current_buf^1)*halfN]; 461 float* pWnd2 = &wnd[halfN]; 462 float* pDst2 = &dst[halfN]; 463 // add the result to target, windowed 464 for (unsigned int k=0;k<halfN;k++) 465 { 466 // 1st part is overlap add 467 *pT1++ += *pWnd1++ * *pDst1++; 468 // 2nd part is set as has no history 469 *pT2++ = *pWnd2++ * *pDst2++; 470 } 471 } 472 473 unsigned int N; // the block size 474 unsigned int halfN; // half block size precalculated 475 // FFTW data structures 476 float *lt,*rt,*dst; // left total, right total (source arrays), destination array 477 fftwf_complex *dftL,*dftR,*src; // intermediate arrays (FFTs of lt & rt, processing source) 478 fftwf_plan loadL,loadR,store; // plans for loading the data into the intermediate format and back 479 // buffers 480 std::vector<cfloat> frontL,frontR,avg,surL,surR; // the signal (phase-corrected) in the frequency domain 481 #ifdef FILTERED_LFE 482 std::vector<cfloat> trueavg; // for lfe generation 483 #endif 484 std::vector<float> xfs,yfs; // the feature space positions for each frequency bin 485 std::vector<float> wnd; // the window function, precalculated 486 std::vector<float> filter[6]; // a frequency filter for each output channel 487 std::vector<float> inbuf[2]; // the sliding input buffers 488 std::vector<float> outbuf[6]; // the sliding output buffers 489 // coefficients 490 float surround_high,surround_low; // high and low surround mixing coefficient (e.g. 0.8165/0.5774) 491 float surround_balance; // the xfs balance that follows from the coeffs 492 float surround_level; // gain for the surround channels (follows from the coeffs 493 float master_gain; // gain for all channels 494 float phase_offsetL, phase_offsetR;// phase shifts to be applied to the rear channels 495 float front_separation; // front stereo separation 496 float rear_separation; // rear stereo separation 497 bool linear_steering; // whether the steering should be linear or not 498 cfloat A,B,C,D,E,F,G,H; // coefficients for the linear steering 499 int current_buf; // specifies which buffer is 2nd half of input sliding buffer 500 float * inbufs[2]; // for passing back to driver 501 float * outbufs[6]; // for passing back to driver 502 503 friend class fsurround_decoder; 504 }; 505 506 507 // implementation of the shell class 508 509 fsurround_decoder::fsurround_decoder(unsigned blocksize): impl(new decoder_impl(blocksize)) { } 510 511 fsurround_decoder::~fsurround_decoder() { delete impl; } 512 513 void fsurround_decoder::decode(float center_width, float dimension, float adaption_rate) { 514 impl->decode(center_width,dimension,adaption_rate); 515 } 516 517 void fsurround_decoder::flush() { impl->flush(); } 518 519 void fsurround_decoder::surround_coefficients(float a, float b) { impl->surround_coefficients(a,b); } 520 521 void fsurround_decoder::phase_mode(unsigned mode) { impl->phase_mode(mode); } 522 523 void fsurround_decoder::steering_mode(bool mode) { impl->steering_mode(mode); } 524 525 void fsurround_decoder::separation(float front, float rear) { impl->separation(front,rear); } 526 527 float ** fsurround_decoder::getInputBuffers() 528 { 529 return impl->getInputBuffers(); 530 } 531 532 float ** fsurround_decoder::getOutputBuffers() 533 { 534 return impl->getOutputBuffers(); 535 } 536 537 void fsurround_decoder::sample_rate(unsigned int samplerate) 538 { 539 impl->sample_rate(samplerate); 540 } 1 /* 2 Copyright (C) 2007 Christian Kothe 3 4 This program is free software; you can redistribute it and/or 5 modify it under the terms of the GNU General Public License 6 as published by the Free Software Foundation; either version 2 7 of the License, or (at your option) any later version. 8 9 This program is distributed in the hope that it will be useful, 10 but WITHOUT ANY WARRANTY; without even the implied warranty of 11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 GNU General Public License for more details. 13 14 You should have received a copy of the GNU General Public License 15 along with this program; if not, write to the Free Software 16 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. 17 */ 18 19 //#define USE_FFTW3 20 21 #include "el_processor.h" 22 #include <complex> 23 #include <cmath> 24 #include <vector> 25 #ifdef USE_FFTW3 26 #include "fftw3.h" 27 #else 28 extern "C" { 29 #include "dsputil.h" 30 }; 31 typedef FFTSample FFTComplexArray[2]; 32 #endif 33 34 35 #ifdef USE_FFTW3 36 #pragma comment (lib,"libfftw3f-3.lib") 37 #endif 38 39 typedef std::complex<float> cfloat; 40 41 const float PI = 3.141592654; 42 const float epsilon = 0.000001; 43 //const float center_level = 0.5*sqrt(0.5); // gain of the center channel 44 //const float center_level = sqrt(0.5); // gain of the center channel 45 const float center_level = 1.0; // gain of the center channel 46 //const float center_level = 0.5; // gain of the center channel 47 48 // should be .6-.7 49 // but with centerlevel 2x what its supposed to be, we halve 0.68 50 // to keep center from clipping 51 //const float window_gain = 0.34; 52 //const float window_gain = 0.68; 53 const float window_gain = 0.95; // to prive a bit of margin 54 55 // private implementation of the surround decoder 56 class decoder_impl { 57 public: 58 // create an instance of the decoder 59 // blocksize is fixed over the lifetime of this object for performance reasons 60 decoder_impl(unsigned blocksize=8192): N(blocksize), halfN(blocksize/2) { 61 #ifdef USE_FFTW3 62 // create FFTW buffers 63 lt = (float*)fftwf_malloc(sizeof(float)*N); 64 rt = (float*)fftwf_malloc(sizeof(float)*N); 65 dst = (float*)fftwf_malloc(sizeof(float)*N); 66 dftL = (fftwf_complex*)fftwf_malloc(sizeof(fftwf_complex)*N); 67 dftR = (fftwf_complex*)fftwf_malloc(sizeof(fftwf_complex)*N); 68 src = (fftwf_complex*)fftwf_malloc(sizeof(fftwf_complex)*N); 69 loadL = fftwf_plan_dft_r2c_1d(N, lt, dftL,FFTW_MEASURE); 70 loadR = fftwf_plan_dft_r2c_1d(N, rt, dftR,FFTW_MEASURE); 71 store = fftwf_plan_dft_c2r_1d(N, src, dst,FFTW_MEASURE); 72 #else 73 // create lavc fft buffers 74 lt = (float*)av_malloc(sizeof(FFTSample)*N); 75 rt = (float*)av_malloc(sizeof(FFTSample)*N); 76 dftL = (FFTComplexArray*)av_malloc(sizeof(FFTComplex)*N*2); 77 dftR = (FFTComplexArray*)av_malloc(sizeof(FFTComplex)*N*2); 78 src = (FFTComplexArray*)av_malloc(sizeof(FFTComplex)*N*2); 79 fftContextForward = (FFTContext*)av_malloc(sizeof(FFTContext)); 80 memset(fftContextForward, 0, sizeof(FFTContext)); 81 fftContextReverse = (FFTContext*)av_malloc(sizeof(FFTContext)); 82 memset(fftContextReverse, 0, sizeof(FFTContext)); 83 ff_fft_init(fftContextForward, 13, 0); 84 ff_fft_init(fftContextReverse, 13, 1); 85 #endif 86 // resize our own buffers 87 frontR.resize(N); 88 frontL.resize(N); 89 avg.resize(N); 90 surR.resize(N); 91 surL.resize(N); 92 trueavg.resize(N); 93 xfs.resize(N); 94 yfs.resize(N); 95 inbuf[0].resize(N); 96 inbuf[1].resize(N); 97 for (unsigned c=0;c<6;c++) { 98 outbuf[c].resize(N); 99 filter[c].resize(N); 100 } 101 // DC component of filters is always 0 102 for (unsigned c=0;c<5;c++) 103 { 104 filter[c][0] = 0.0; 105 filter[c][1] = 0.0; 106 filter[c][halfN] = 0.0; 107 } 108 sample_rate(48000); 109 // generate the window function (square root of hann, b/c it is applied before and after the transform) 110 wnd.resize(N); 111 // dft normalization included in the window for zero cost scaling 112 // also add a gain factor of *2 due to processing gain in algo (see center_level) 113 surround_gain(1.0); 114 current_buf = 0; 115 // set the default coefficients 116 surround_coefficients(0.8165,0.5774); 117 phase_mode(0); 118 separation(1,1); 119 steering_mode(1); 120 } 121 122 // destructor 123 ~decoder_impl() { 124 #ifdef USE_FFTW3 125 // clean up the FFTW stuff 126 fftwf_destroy_plan(store); 127 fftwf_destroy_plan(loadR); 128 fftwf_destroy_plan(loadL); 129 fftwf_free(src); 130 fftwf_free(dftR); 131 fftwf_free(dftL); 132 fftwf_free(dst); 133 fftwf_free(rt); 134 fftwf_free(lt); 135 #else 136 ff_fft_end(fftContextForward); 137 ff_fft_end(fftContextReverse); 138 av_free(src); 139 av_free(dftR); 140 av_free(dftL); 141 av_free(rt); 142 av_free(lt); 143 av_free(fftContextForward); 144 av_free(fftContextReverse); 145 #endif 146 } 147 148 float ** getInputBuffers() 149 { 150 inbufs[0] = &inbuf[0][current_buf*halfN]; 151 inbufs[1] = &inbuf[1][current_buf*halfN]; 152 return inbufs; 153 } 154 155 float ** getOutputBuffers() 156 { 157 outbufs[0] = &outbuf[0][current_buf*halfN]; 158 outbufs[1] = &outbuf[1][current_buf*halfN]; 159 outbufs[2] = &outbuf[2][current_buf*halfN]; 160 outbufs[3] = &outbuf[3][current_buf*halfN]; 161 outbufs[4] = &outbuf[4][current_buf*halfN]; 162 outbufs[5] = &outbuf[5][current_buf*halfN]; 163 return outbufs; 164 } 165 166 // decode a chunk of stereo sound, has to contain exactly blocksize samples 167 // center_width [0..1] distributes the center information towards the front left/right channels, 1=full distribution, 0=no distribution 168 // dimension [0..1] moves the soundfield backwards, 0=front, 1=side 169 // adaption_rate [0..1] determines how fast the steering gets adapted, 1=instantaneous, 0.1 = very slow adaption 170 void decode(float center_width, float dimension, float adaption_rate) { 171 // process first part 172 int index; 173 index = current_buf*halfN; 174 float *in_second[2] = {&inbuf[0][index],&inbuf[1][index]}; 175 current_buf ^= 1; 176 index = current_buf*halfN; 177 float *in_first[2] = {&inbuf[0][index],&inbuf[1][index]}; 178 add_output(in_first,in_second,center_width,dimension,adaption_rate,true); 179 // shift last half of input buffer to the beginning 180 } 181 182 // flush the internal buffers 183 void flush() { 184 for (unsigned k=0;k<N;k++) { 185 for (unsigned c=0;c<6;c++) 186 outbuf[c][k] = 0; 187 inbuf[0][k] = 0; 188 inbuf[1][k] = 0; 189 } 190 } 191 192 // set lfe filter params 193 void sample_rate(unsigned int srate) { 194 // lfe filter is just straight through band limited 195 unsigned int cutoff = (250*N)/srate; 196 for (unsigned f=0;f<=halfN;f++) { 197 if ((f>=2) && (f<cutoff)) 198 filter[5][f] = 1.0; 199 else 200 filter[5][f] = 0.0; 201 } 202 } 203 204 // set the assumed surround mixing coefficients 205 void surround_coefficients(float a, float b) { 206 // calc the simple coefficients 207 surround_high = a; 208 surround_low = b; 209 surround_balance = (a-b)/(a+b); 210 surround_level = 1/(a+b); 211 // calc the linear coefficients 212 cfloat i(0,1), u((a+b)*i), v((b-a)*i), n(0.25,0),o(1,0); 213 A = (v-o)*n; B = (o-u)*n; C = (-o-v)*n; D = (o+u)*n; 214 E = (o+v)*n; F = (o+u)*n; G = (o-v)*n; H = (o-u)*n; 215 } 216 217 void surround_gain(float gain) { 218 master_gain = gain * window_gain * 0.5 * 0.25; 219 for (unsigned k=0;k<N;k++) 220 wnd[k] = sqrt(master_gain*(1-cos(2*PI*k/N))/N); 221 } 222 223 // set the phase shifting mode 224 void phase_mode(unsigned mode) { 225 const float modes[4][2] = {{0,0},{0,PI},{PI,0},{-PI/2,PI/2}}; 226 phase_offsetL = modes[mode][0]; 227 phase_offsetR = modes[mode][1]; 228 } 229 230 // what steering mode should be chosen 231 void steering_mode(bool mode) { linear_steering = mode; } 232 233 // set front & rear separation controls 234 void separation(float front, float rear) { 235 front_separation = front; 236 rear_separation = rear; 237 } 238 239 private: 240 // polar <-> cartesian coodinates conversion 241 static inline float amplitude(const float cf[2]) { return sqrt(cf[0]*cf[0] + cf[1]*cf[1]); } 242 static inline float phase(const float cf[2]) { return atan2(cf[1],cf[0]); } 243 static inline cfloat polar(float a, float p) { return cfloat(a*cos(p),a*sin(p)); } 244 static inline float sqr(float x) { return x*x; } 245 // the dreaded min/max 246 static inline float min(float a, float b) { return a<b?a:b; } 247 static inline float max(float a, float b) { return a>b?a:b; } 248 static inline float clamp(float x) { return max(-1,min(1,x)); } 249 250 // handle the output buffering for overlapped calls of block_decode 251 void add_output(float *input1[2], float *input2[2], float center_width, float dimension, float adaption_rate, bool result=false) { 252 // add the windowed data to the last 1/2 of the output buffer 253 float *out[6] = {&outbuf[0][0],&outbuf[1][0],&outbuf[2][0],&outbuf[3][0],&outbuf[4][0],&outbuf[5][0]}; 254 block_decode(input1,input2,out,center_width,dimension,adaption_rate); 255 } 256 257 // CORE FUNCTION: decode a block of data 258 void block_decode(float *input1[2], float *input2[2], float *output[6], float center_width, float dimension, float adaption_rate) { 259 // 1. scale the input by the window function; this serves a dual purpose: 260 // - first it improves the FFT resolution b/c boundary discontinuities (and their frequencies) get removed 261 // - second it allows for smooth blending of varying filters between the blocks 262 { 263 float* pWnd = &wnd[0]; 264 float* pLt = <[0]; 265 float* pRt = &rt[0]; 266 float* pIn0 = input1[0]; 267 float* pIn1 = input1[1]; 268 for (unsigned k=0;k<halfN;k++) { 269 *pLt++ = *pIn0++ * *pWnd; 270 *pRt++ = *pIn1++ * *pWnd++; 271 } 272 pIn0 = input2[0]; 273 pIn1 = input2[1]; 274 for (unsigned k=0;k<halfN;k++) { 275 *pLt++ = *pIn0++ * *pWnd; 276 *pRt++ = *pIn1++ * *pWnd++; 277 } 278 } 279 280 #ifdef USE_FFTW3 281 // ... and tranform it into the frequency domain 282 fftwf_execute(loadL); 283 fftwf_execute(loadR); 284 #else 285 ff_fft_permuteRC(fftContextForward, lt, (FFTComplex*)&dftL[0]); 286 ff_fft_permuteRC(fftContextForward, rt, (FFTComplex*)&dftR[0]); 287 ff_fft_calc(fftContextForward, (FFTComplex*)&dftL[0]); 288 ff_fft_calc(fftContextForward, (FFTComplex*)&dftR[0]); 289 #endif 290 291 // 2. compare amplitude and phase of each DFT bin and produce the X/Y coordinates in the sound field 292 // but dont do DC or N/2 component 293 for (unsigned f=2;f<halfN;f++) { 294 // get left/right amplitudes/phases 295 float ampL = amplitude(dftL[f]), ampR = amplitude(dftR[f]); 296 float phaseL = phase(dftL[f]), phaseR = phase(dftR[f]); 297 // if (ampL+ampR < epsilon) 298 // continue; 299 300 // calculate the amplitude/phase difference 301 float ampDiff = clamp((ampL+ampR < epsilon) ? 0 : (ampR-ampL) / (ampR+ampL)); 302 float phaseDiff = phaseL - phaseR; 303 if (phaseDiff < -PI) phaseDiff += 2*PI; 304 if (phaseDiff > PI) phaseDiff -= 2*PI; 305 phaseDiff = abs(phaseDiff); 306 307 if (linear_steering) { 308 /* cfloat w = polar(sqrt(ampL*ampL+ampR*ampR), (phaseL+phaseR)/2); 309 cfloat lt = cfloat(dftL[f][0],dftL[f][1])/w, rt = cfloat(dftR[f][0],dftR[f][1])/w; */ 310 // xfs[f] = -(C*(rt-H) - B*E + F*A + G*(D-lt)) / (G*A - C*E).real(); 311 // yfs[f] = (rt - (xfs[f]*E+H))/(F+xfs[f]*G); 312 313 /* 314 Problem: 315 This assumes that the values are interpolated linearly between the cardinal points. 316 But this way we have no chance of knowing the average volume... 317 - Can we solve that computing everything under the assumption of normalized volume? 318 No. Seemingly not. 319 - Maybe we should add w explitcitly into the equation and see if we can solve it... 320 */ 321 322 323 //cfloat lt(0.5,0),rt(0.5,0); 324 //cfloat x(0,0), y(1,0); 325 /*cfloat p = (C*(rt-H) - B*E + F*A + G*(D-lt)) / (G*A - C*E); 326 cfloat q = B*(rt+H) + F*(D-lt) / (G*A - C*E); 327 cfloat s = sqrt(p*p/4.0f - q); 328 cfloat x = -p; 329 cfloat x1 = -p/2.0f + s; 330 cfloat x2 = -p/2.0f - s; 331 float x = 0; 332 if (x1.real() >= -1 && x1.real() <= 1) 333 x = x1.real(); 334 else if (x2.real() >= -1 && x2.real() <= 1) 335 x = x2.real();*/ 336 337 //cfloat yp = (rt - (x*E+H))/(F+x*G); 338 //cfloat xp = (lt - (y*B+D))/(A+y*C); 339 340 /*xfs[f] = x; 341 yfs[f] = y.real();*/ 342 343 // --- this is the fancy new linear mode --- 344 345 // get sound field x/y position 346 yfs[f] = get_yfs(ampDiff,phaseDiff); 347 xfs[f] = get_xfs(ampDiff,yfs[f]); 348 349 // add dimension control 350 yfs[f] = clamp(yfs[f] - dimension); 351 352 // add crossfeed control 353 xfs[f] = clamp(xfs[f] * (front_separation*(1+yfs[f])/2 + rear_separation*(1-yfs[f])/2)); 354 355 // 3. generate frequency filters for each output channel 356 float left = (1-xfs[f])/2, right = (1+xfs[f])/2; 357 float front = (1+yfs[f])/2, back = (1-yfs[f])/2; 358 float volume[5] = { 359 front * (left * center_width + max(0,-xfs[f]) * (1-center_width)), // left 360 front * center_level*((1-abs(xfs[f])) * (1-center_width)), // center 361 front * (right * center_width + max(0, xfs[f]) * (1-center_width)), // right 362 back * surround_level * left, // left surround 363 back * surround_level * right // right surround 364 }; 365 366 // adapt the prior filter 367 for (unsigned c=0;c<5;c++) 368 filter[c][f] = (1-adaption_rate)*filter[c][f] + adaption_rate*volume[c]; 369 370 } else { 371 // --- this is the old & simple steering mode --- 372 373 // calculate the amplitude/phase difference 374 float ampDiff = clamp((ampL+ampR < epsilon) ? 0 : (ampR-ampL) / (ampR+ampL)); 375 float phaseDiff = phaseL - phaseR; 376 if (phaseDiff < -PI) phaseDiff += 2*PI; 377 if (phaseDiff > PI) phaseDiff -= 2*PI; 378 phaseDiff = abs(phaseDiff); 379 380 // determine sound field x-position 381 xfs[f] = ampDiff; 382 383 // determine preliminary sound field y-position from phase difference 384 yfs[f] = 1 - (phaseDiff/PI)*2; 385 386 if (abs(xfs[f]) > surround_balance) { 387 // blend linearly between the surrounds and the fronts if the balance exceeds the surround encoding balance 388 // this is necessary because the sound field is trapezoidal and will be stretched behind the listener 389 float frontness = (abs(xfs[f]) - surround_balance)/(1-surround_balance); 390 yfs[f] = (1-frontness) * yfs[f] + frontness * 1; 391 } 392 393 // add dimension control 394 yfs[f] = clamp(yfs[f] - dimension); 395 396 // add crossfeed control 397 xfs[f] = clamp(xfs[f] * (front_separation*(1+yfs[f])/2 + rear_separation*(1-yfs[f])/2)); 398 399 // 3. generate frequency filters for each output channel, according to the signal position 400 // the sum of all channel volumes must be 1.0 401 float left = (1-xfs[f])/2, right = (1+xfs[f])/2; 402 float front = (1+yfs[f])/2, back = (1-yfs[f])/2; 403 float volume[5] = { 404 front * (left * center_width + max(0,-xfs[f]) * (1-center_width)), // left 405 front * center_level*((1-abs(xfs[f])) * (1-center_width)), // center 406 front * (right * center_width + max(0, xfs[f]) * (1-center_width)), // right 407 back * surround_level*max(0,min(1,((1-(xfs[f]/surround_balance))/2))), // left surround 408 back * surround_level*max(0,min(1,((1+(xfs[f]/surround_balance))/2))) // right surround 409 }; 410 411 // adapt the prior filter 412 for (unsigned c=0;c<5;c++) 413 filter[c][f] = (1-adaption_rate)*filter[c][f] + adaption_rate*volume[c]; 414 } 415 416 // ... and build the signal which we want to position 417 frontL[f] = polar(ampL+ampR,phaseL); 418 frontR[f] = polar(ampL+ampR,phaseR); 419 avg[f] = frontL[f] + frontR[f]; 420 surL[f] = polar(ampL+ampR,phaseL+phase_offsetL); 421 surR[f] = polar(ampL+ampR,phaseR+phase_offsetR); 422 trueavg[f] = cfloat(dftL[f][0] + dftR[f][0], dftL[f][1] + dftR[f][1]); 423 } 424 425 // 4. distribute the unfiltered reference signals over the channels 426 apply_filter(&frontL[0],&filter[0][0],&output[0][0]); // front left 427 apply_filter(&avg[0], &filter[1][0],&output[1][0]); // front center 428 apply_filter(&frontR[0],&filter[2][0],&output[2][0]); // front right 429 apply_filter(&surL[0],&filter[3][0],&output[3][0]); // surround left 430 apply_filter(&surR[0],&filter[4][0],&output[4][0]); // surround right 431 apply_filter(&trueavg[0],&filter[5][0],&output[5][0]); // lfe 432 } 433 434 #define FASTER_CALC 435 // map from amplitude difference and phase difference to yfs 436 inline double get_yfs(double ampDiff, double phaseDiff) { 437 double x = 1-(((1-sqr(ampDiff))*phaseDiff)/PI*2); 438 #ifdef FASTER_CALC 439 double tanX = tan(x); 440 return 0.16468622925824683 + 0.5009268347818189*x - 0.06462757726992101*x*x 441 + 0.09170680403453149*x*x*x + 0.2617754892323973*tanX - 0.04180413533856156*sqr(tanX); 442 #else 443 return 0.16468622925824683 + 0.5009268347818189*x - 0.06462757726992101*x*x 444 + 0.09170680403453149*x*x*x + 0.2617754892323973*tan(x) - 0.04180413533856156*sqr(tan(x)); 445 #endif 446 } 447 448 // map from amplitude difference and yfs to xfs 449 inline double get_xfs(double ampDiff, double yfs) { 450 double x=ampDiff,y=yfs; 451 #ifdef FASTER_CALC 452 double tanX = tan(x); 453 double tanY = tan(y); 454 double asinX = asin(x); 455 double sinX = sin(x); 456 double sinY = sin(y); 457 double x3 = x*x*x; 458 double y2 = y*y; 459 double y3 = y*y2; 460 return 2.464833559224702*x - 423.52131153259404*x*y + 461 67.8557858606918*x3*y + 788.2429425544392*x*y2 - 462 79.97650354902909*x3*y2 - 513.8966153850349*x*y3 + 463 35.68117670186306*x3*y3 + 13867.406173420834*y*asinX - 464 2075.8237075786396*y2*asinX - 908.2722068360281*y3*asinX - 465 12934.654772878019*asinX*sinY - 13216.736529661162*y*tanX + 466 1288.6463247741938*y2*tanX + 1384.372969378453*y3*tanX + 467 12699.231471126128*sinY*tanX + 95.37131275594336*sinX*tanY - 468 91.21223198407546*tanX*tanY; 469 #else 470 return 2.464833559224702*x - 423.52131153259404*x*y + 471 67.8557858606918*x*x*x*y + 788.2429425544392*x*y*y - 472 79.97650354902909*x*x*x*y*y - 513.8966153850349*x*y*y*y + 473 35.68117670186306*x*x*x*y*y*y + 13867.406173420834*y*asin(x) - 474 2075.8237075786396*y*y*asin(x) - 908.2722068360281*y*y*y*asin(x) - 475 12934.654772878019*asin(x)*sin(y) - 13216.736529661162*y*tan(x) + 476 1288.6463247741938*y*y*tan(x) + 1384.372969378453*y*y*y*tan(x) + 477 12699.231471126128*sin(y)*tan(x) + 95.37131275594336*sin(x)*tan(y) - 478 91.21223198407546*tan(x)*tan(y); 479 #endif 480 } 481 482 // filter the complex source signal and add it to target 483 void apply_filter(cfloat *signal, float *flt, float *target) { 484 // filter the signal 485 unsigned f; 486 for (f=0;f<=halfN;f++) { 487 src[f][0] = signal[f].real() * flt[f]; 488 src[f][1] = signal[f].imag() * flt[f]; 489 } 490 #ifdef USE_FFTW3 491 // transform into time domain 492 fftwf_execute(store); 493 494 float* pT1 = &target[current_buf*halfN]; 495 float* pWnd1 = &wnd[0]; 496 float* pDst1 = &dst[0]; 497 float* pT2 = &target[(current_buf^1)*halfN]; 498 float* pWnd2 = &wnd[halfN]; 499 float* pDst2 = &dst[halfN]; 500 // add the result to target, windowed 501 for (unsigned int k=0;k<halfN;k++) 502 { 503 // 1st part is overlap add 504 *pT1++ += *pWnd1++ * *pDst1++; 505 // 2nd part is set as has no history 506 *pT2++ = *pWnd2++ * *pDst2++; 507 } 508 #else 509 // enforce odd symmetry 510 for (f=1;f<halfN;f++) { 511 src[N-f][0] = src[f][0]; 512 src[N-f][1] = -src[f][1]; // complex conjugate 513 } 514 ff_fft_permute(fftContextReverse, (FFTComplex*)&src[0]); 515 ff_fft_calc(fftContextReverse, (FFTComplex*)&src[0]); 516 517 float* pT1 = &target[current_buf*halfN]; 518 float* pWnd1 = &wnd[0]; 519 float* pDst1 = &src[0][0]; 520 float* pT2 = &target[(current_buf^1)*halfN]; 521 float* pWnd2 = &wnd[halfN]; 522 float* pDst2 = &src[halfN][0]; 523 // add the result to target, windowed 524 for (unsigned int k=0;k<halfN;k++) 525 { 526 // 1st part is overlap add 527 *pT1++ += *pWnd1++ * *pDst1; pDst1 += 2; 528 // 2nd part is set as has no history 529 *pT2++ = *pWnd2++ * *pDst2; pDst2 += 2; 530 } 531 #endif 532 } 533 534 #ifndef USE_FFTW3 535 /** 536 * * Do the permutation needed BEFORE calling ff_fft_calc() 537 * special for freesurround that also copies 538 * */ 539 void ff_fft_permuteRC(FFTContext *s, FFTSample *r, FFTComplex *z) 540 { 541 int j, k, np; 542 const uint16_t *revtab = s->revtab; 543 544 /* reverse */ 545 np = 1 << s->nbits; 546 for(j=0;j<np;j++) { 547 k = revtab[j]; 548 z[k].re = r[j]; 549 z[k].im = 0.0; 550 } 551 } 552 553 /** 554 * * Do the permutation needed BEFORE calling ff_fft_calc() 555 * special for freesurround that also copies and 556 * discards im component as it should be 0 557 * */ 558 void ff_fft_permuteCR(FFTContext *s, FFTComplex *z, FFTSample *r) 559 { 560 int j, k, np; 561 FFTComplex tmp; 562 const uint16_t *revtab = s->revtab; 563 564 /* reverse */ 565 np = 1 << s->nbits; 566 for(j=0;j<np;j++) { 567 k = revtab[j]; 568 if (k < j) { 569 r[k] = z[j].re; 570 r[j] = z[k].re; 571 } 572 } 573 } 574 #endif 575 576 unsigned int N; // the block size 577 unsigned int halfN; // half block size precalculated 578 #ifdef USE_FFTW3 579 // FFTW data structures 580 float *lt,*rt,*dst; // left total, right total (source arrays), destination array 581 fftwf_complex *dftL,*dftR,*src; // intermediate arrays (FFTs of lt & rt, processing source) 582 fftwf_plan loadL,loadR,store; // plans for loading the data into the intermediate format and back 583 #else 584 FFTContext *fftContextForward, *fftContextReverse; 585 FFTSample *lt,*rt; // left total, right total (source arrays), destination array 586 FFTComplexArray *dftL,*dftR,*src; // intermediate arrays (FFTs of lt & rt, processing source) 587 #endif 588 // buffers 589 std::vector<cfloat> frontL,frontR,avg,surL,surR; // the signal (phase-corrected) in the frequency domain 590 std::vector<cfloat> trueavg; // for lfe generation 591 std::vector<float> xfs,yfs; // the feature space positions for each frequency bin 592 std::vector<float> wnd; // the window function, precalculated 593 std::vector<float> filter[6]; // a frequency filter for each output channel 594 std::vector<float> inbuf[2]; // the sliding input buffers 595 std::vector<float> outbuf[6]; // the sliding output buffers 596 // coefficients 597 float surround_high,surround_low; // high and low surround mixing coefficient (e.g. 0.8165/0.5774) 598 float surround_balance; // the xfs balance that follows from the coeffs 599 float surround_level; // gain for the surround channels (follows from the coeffs 600 float master_gain; // gain for all channels 601 float phase_offsetL, phase_offsetR;// phase shifts to be applied to the rear channels 602 float front_separation; // front stereo separation 603 float rear_separation; // rear stereo separation 604 bool linear_steering; // whether the steering should be linear or not 605 cfloat A,B,C,D,E,F,G,H; // coefficients for the linear steering 606 int current_buf; // specifies which buffer is 2nd half of input sliding buffer 607 float * inbufs[2]; // for passing back to driver 608 float * outbufs[6]; // for passing back to driver 609 610 friend class fsurround_decoder; 611 }; 612 613 614 // implementation of the shell class 615 616 fsurround_decoder::fsurround_decoder(unsigned blocksize): impl(new decoder_impl(blocksize)) { } 617 618 fsurround_decoder::~fsurround_decoder() { delete impl; } 619 620 void fsurround_decoder::decode(float center_width, float dimension, float adaption_rate) { 621 impl->decode(center_width,dimension,adaption_rate); 622 } 623 624 void fsurround_decoder::flush() { impl->flush(); } 625 626 void fsurround_decoder::surround_coefficients(float a, float b) { impl->surround_coefficients(a,b); } 627 628 void fsurround_decoder::gain(float gain) { impl->surround_gain(gain); } 629 630 void fsurround_decoder::phase_mode(unsigned mode) { impl->phase_mode(mode); } 631 632 void fsurround_decoder::steering_mode(bool mode) { impl->steering_mode(mode); } 633 634 void fsurround_decoder::separation(float front, float rear) { impl->separation(front,rear); } 635 636 float ** fsurround_decoder::getInputBuffers() 637 { 638 return impl->getInputBuffers(); 639 } 640 641 float ** fsurround_decoder::getOutputBuffers() 642 { 643 return impl->getOutputBuffers(); 644 } 645 646 void fsurround_decoder::sample_rate(unsigned int samplerate) 647 { 648 impl->sample_rate(samplerate); 649 } -
libs/libmythfreesurround/freesurround.cpp
192 192 { 193 193 params.phasemode = 1; 194 194 params.center_width = 0; 195 params.gain = 1.0; 195 196 } 196 197 else 197 198 { 198 params.center_width = 50; 199 params.center_width = 70; 200 // for 50, gain should be about 1.9, c/lr about 2.7 201 // for 70, gain should be about 3.1, c/lr about 1.5 202 params.gain = 3.1; 199 203 } 200 204 switch (surround_mode) 201 205 { … … 231 235 decoder->phase_mode(params.phasemode); 232 236 decoder->surround_coefficients(params.coeff_a, params.coeff_b); 233 237 decoder->separation(params.front_sep/100.0,params.rear_sep/100.0); 238 decoder->gain(params.gain); 234 239 } 235 240 } 236 241 … … 244 249 phasemode(0), 245 250 steering(1), 246 251 front_sep(100), 247 rear_sep(100) 252 rear_sep(100), 253 gain(1.0) 248 254 { 249 255 } 250 256 … … 267 273 VERBOSE(QString("FreeSurround::~FreeSurround done")); 268 274 } 269 275 270 void get_peak_i(short* data, int count, int* maxv, int* minv)271 {272 int _maxv = *data++;273 int _minv = _maxv;274 for(int i=1;i<count;i++)275 {276 int v = *data++;277 if (v > _maxv) _maxv = v;278 if (v < _minv) _minv = v;279 }280 *maxv = _maxv;281 *minv = _minv;282 }283 284 void get_peak_i2(short* data, int count, int* maxv, int* minv)285 {286 int _maxv = *data;287 data += 2;288 int _minv = _maxv;289 for(int i=1;i<count;i++)290 {291 int v = *data;292 if (v > _maxv) _maxv = v;293 if (v < _minv) _minv = v;294 data += 2;295 }296 *maxv = _maxv;297 *minv = _minv;298 }299 300 void get_peak(float* data, int count, int* maxv, int* minv)301 {302 int _maxv = lrintf(*data++);303 int _minv = _maxv;304 for(int i=1;i<count;i++)305 {306 int v = lrintf(*data++);307 if (v > _maxv) _maxv = v;308 if (v < _minv) _minv = v;309 }310 *maxv = _maxv;311 *minv = _minv;312 }313 314 276 uint FreeSurround::putSamples(short* samples, uint numSamples, uint numChannels, int step) 315 277 { 316 278 int i; -
libs/libmythfreesurround/el_processor.h
1 /* 2 Copyright (C) 2007 Christian Kothe 3 4 This program is free software; you can redistribute it and/or 5 modify it under the terms of the GNU General Public License 6 as published by the Free Software Foundation; either version 2 7 of the License, or (at your option) any later version. 8 9 This program is distributed in the hope that it will be useful, 10 but WITHOUT ANY WARRANTY; without even the implied warranty of 11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 GNU General Public License for more details. 13 14 You should have received a copy of the GNU General Public License 15 along with this program; if not, write to the Free Software 16 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. 17 */ 18 19 #ifndef EL_PROCESSOR_H 20 #define EL_PROCESSOR_H 21 22 // the Free Surround decoder 23 class fsurround_decoder { 24 public: 25 // create an instance of the decoder 26 // blocksize is fixed over the lifetime of this object for performance reasons 27 fsurround_decoder(unsigned blocksize=8192); 28 // destructor 29 ~fsurround_decoder(); 30 31 float ** getInputBuffers(); 32 float ** getOutputBuffers(); 33 34 // decode a chunk of stereo sound, has to contain exactly blocksize samples 35 // center_width [0..1] distributes the center information towards the front left/right channels, 1=full distribution, 0=no distribution 36 // dimension [0..1] moves the soundfield backwards, 0=front, 1=side 37 // adaption_rate [0..1] determines how fast the steering gets adapted, 1=instantaneous, 0.1 = very slow adaption 38 //void decode(float *input[2], float *output[6], float center_width=1, float dimension=0, float adaption_rate=1); 39 void decode(float center_width=1, float dimension=0, float adaption_rate=1); 40 41 // flush the internal buffers 42 void flush(); 43 44 // --- advanced configuration --- 45 46 // override the surround coefficients 47 // a is the coefficient of left rear in left total, b is the coefficient of left rear in right total; the same is true for right. 48 void surround_coefficients(float a, float b); 49 50 // set the phase shifting mode for decoding 51 // 0 = (+0°,+0°) - music mode 52 // 1 = (+0°,+180°) - PowerDVD compatibility 53 // 2 = (+180°,+0°) - BeSweet compatibility 54 // 3 = (-90°,+90°) - This seems to work. I just don't know why. 55 void phase_mode(unsigned mode); 56 57 // override the steering mode 58 // false = simple non-linear steering (old) 59 // true = advanced linear steering (new) 60 void steering_mode(bool mode); 61 62 // set front/rear stereo separation 63 // 1.0 is default, 0.0 is mono 64 void separation(float front,float rear); 65 66 // set samplerate for lfe filter 67 void sample_rate(unsigned int samplerate); 68 69 private: 70 class decoder_impl *impl; // private implementation (details hidden) 71 }; 72 73 74 #endif 1 /* 2 Copyright (C) 2007 Christian Kothe 3 4 This program is free software; you can redistribute it and/or 5 modify it under the terms of the GNU General Public License 6 as published by the Free Software Foundation; either version 2 7 of the License, or (at your option) any later version. 8 9 This program is distributed in the hope that it will be useful, 10 but WITHOUT ANY WARRANTY; without even the implied warranty of 11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 GNU General Public License for more details. 13 14 You should have received a copy of the GNU General Public License 15 along with this program; if not, write to the Free Software 16 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. 17 */ 18 19 #ifndef EL_PROCESSOR_H 20 #define EL_PROCESSOR_H 21 22 // the Free Surround decoder 23 class fsurround_decoder { 24 public: 25 // create an instance of the decoder 26 // blocksize is fixed over the lifetime of this object for performance reasons 27 fsurround_decoder(unsigned blocksize=8192); 28 // destructor 29 ~fsurround_decoder(); 30 31 float ** getInputBuffers(); 32 float ** getOutputBuffers(); 33 34 // decode a chunk of stereo sound, has to contain exactly blocksize samples 35 // center_width [0..1] distributes the center information towards the front left/right channels, 1=full distribution, 0=no distribution 36 // dimension [0..1] moves the soundfield backwards, 0=front, 1=side 37 // adaption_rate [0..1] determines how fast the steering gets adapted, 1=instantaneous, 0.1 = very slow adaption 38 //void decode(float *input[2], float *output[6], float center_width=1, float dimension=0, float adaption_rate=1); 39 void decode(float center_width=1, float dimension=0, float adaption_rate=1); 40 41 // flush the internal buffers 42 void flush(); 43 44 // --- advanced configuration --- 45 46 // override the surround coefficients 47 // a is the coefficient of left rear in left total, b is the coefficient of left rear in right total; the same is true for right. 48 void surround_coefficients(float a, float b); 49 50 // override for master surround gain 51 void gain(float gain); 52 53 // set the phase shifting mode for decoding 54 // 0 = (+0°,+0°) - music mode 55 // 1 = (+0°,+180°) - PowerDVD compatibility 56 // 2 = (+180°,+0°) - BeSweet compatibility 57 // 3 = (-90°,+90°) - This seems to work. I just don't know why. 58 void phase_mode(unsigned mode); 59 60 // override the steering mode 61 // false = simple non-linear steering (old) 62 // true = advanced linear steering (new) 63 void steering_mode(bool mode); 64 65 // set front/rear stereo separation 66 // 1.0 is default, 0.0 is mono 67 void separation(float front,float rear); 68 69 // set samplerate for lfe filter 70 void sample_rate(unsigned int samplerate); 71 72 private: 73 class decoder_impl *impl; // private implementation (details hidden) 74 }; 75 76 77 #endif -
libs/libmythfreesurround/freesurround.h
66 66 int32_t phasemode; // phase shifting mode 67 67 int32_t steering; // steering mode (0=simple, 1=linear) 68 68 int32_t front_sep, rear_sep;// front/rear stereo separation 69 float gain; // total gain 69 70 70 71 // (default) constructor 71 72 fsurround_params(int32_t center_width=100, int32_t dimension=0); -
libs/libmythfreesurround/libmythfreesurround.pro
19 19 SOURCES += el_processor.cpp 20 20 SOURCES += freesurround.cpp 21 21 22 #required until its rewritten to use avcodec fft lib 23 #LIBS += -lfftw3 24 LIBS += -lfftw3f 25 22 contains( CONFIG_LIBFFTW3, yes ) { 23 #required until its rewritten to use avcodec fft lib 24 LIBS += -lfftw3f 25 DEFINES += USE_FFTW3 26 # while Im testing 27 DEPENDPATH += ../libavcodec 28 LIBS += -L../libavcodec -lavcodec 29 INCLUDEPATH += ../../libs/libavutil 30 DEFINES -= USE_FFTW3 31 } else { 32 DEPENDPATH += ../libavcodec 33 LIBS += -L../libavcodec -lavcodec 34 INCLUDEPATH += ../../libs/libavutil 35 } -
programs/mythfrontend/globalsettings.cpp
56 56 } 57 57 #endif 58 58 #ifdef USING_ALSA 59 gc->addSelection("ALSA:default", "ALSA:default"); 59 gc->addSelection("ALSA:default", "ALSA:default"); 60 gc->addSelection("ALSA:surround51", "ALSA:surround51"); 61 gc->addSelection("ALSA:analog", "ALSA:analog"); 62 gc->addSelection("ALSA:digital", "ALSA:digital"); 63 gc->addSelection("ALSA:mixed-analog", "ALSA:mixed-analog"); 64 gc->addSelection("ALSA:mixed-digital", "ALSA:mixed-digital"); 60 65 #endif 61 66 #ifdef USING_ARTS 62 67 gc->addSelection("ARTS:", "ARTS:"); … … 78 83 return gc; 79 84 } 80 85 86 static HostComboBox *MaxAudioChannels() 87 { 88 HostComboBox *gc = new HostComboBox("MaxChannels",false); 89 gc->setLabel(QObject::tr("Max Audio Channels")); 90 gc->addSelection(QObject::tr("Stereo"), "2", true); // default 91 gc->addSelection(QObject::tr("5.1"), "6"); 92 gc->setHelpText( 93 QObject::tr( 94 "Set the maximum number of audio channels to be decoded. " 95 "This is for multi-channel/surround audio playback.")); 96 return gc; 97 } 98 99 static HostComboBox *AudioUpmixType() 100 { 101 HostComboBox *gc = new HostComboBox("AudioUpmixType",false); 102 gc->setLabel(QObject::tr("Upmix")); 103 gc->addSelection(QObject::tr("Passive"), "0"); 104 gc->addSelection(QObject::tr("Active Simple"), "1"); 105 gc->addSelection(QObject::tr("Active Linear"), "2", true); // default 106 gc->setHelpText( 107 QObject::tr( 108 "Set the audio upmix type for 2ch to 6ch conversion. " 109 "This is for multi-channel/surround audio playback.")); 110 return gc; 111 } 112 81 113 static HostComboBox *PassThroughOutputDevice() 82 114 { 83 115 HostComboBox *gc = new HostComboBox("PassThruOutputDevice", true); … … 3255 3297 vgrp0->addChild(AC3PassThrough()); 3256 3298 vgrp0->addChild(DTSPassThrough()); 3257 3299 3300 HorizontalConfigurationGroup *agrp = 3301 new HorizontalConfigurationGroup(false, false, true, true); 3302 agrp->addChild(MaxAudioChannels()); 3303 agrp->addChild(AudioUpmixType()); 3304 addChild(agrp); 3305 3258 3306 VerticalConfigurationGroup *vgrp1 = 3259 3307 new VerticalConfigurationGroup(false, false, true, true); 3260 3308 vgrp1->addChild(AggressiveBuffer()); -
programs/mythtranscode/transcode.cpp
55 55 56 56 // reconfigure sound out for new params 57 57 virtual void Reconfigure(int audio_bits, int audio_channels, 58 int audio_samplerate, bool audio_passthru) 58 int audio_samplerate, bool audio_passthru, 59 void *audio_codec = NULL) 59 60 { 61 ClearError(); 60 62 (void)audio_samplerate; 61 63 (void)audio_passthru; 64 (void)audio_codec; 62 65 bits = audio_bits; 63 66 channels = audio_channels; 64 67 bytes_per_sample = bits * channels / 8; 68 if ((uint)audio_channels > 2) 69 Error(QString("Invalid channel count %1").arg(channels)); 65 70 } 66 71 67 72 // dsprate is in 100 * samples/second -
libs/libmythtv/avformatdecoder.h
261 261 bool allow_ac3_passthru; 262 262 bool allow_dts_passthru; 263 263 bool disable_passthru; 264 uint max_channels; 265 264 266 VideoFrame *dummy_frame; 265 267 266 268 AudioInfo audioIn; -
libs/libmythtv/avformatdecoder.cpp
51 51 52 52 #define MAX_AC3_FRAME_SIZE 6144 53 53 54 /** Set to zero to allow any number of AC3 channels. */55 #define MAX_OUTPUT_CHANNELS 256 57 54 static int cc608_parity(uint8_t byte); 58 55 static int cc608_good_parity(const int *parity_table, uint16_t data); 59 56 static void cc608_build_parity_table(int *parity_table); … … 400 397 // Audio 401 398 audioSamples(new short int[AVCODEC_MAX_AUDIO_FRAME_SIZE]), 402 399 allow_ac3_passthru(false), allow_dts_passthru(false), 403 disable_passthru(false), dummy_frame(NULL), 400 disable_passthru(false), max_channels(2), 401 dummy_frame(NULL), 404 402 // DVD 405 403 lastdvdtitle(-1), lastcellstart(0), 406 404 dvdmenupktseen(false), indvdstill(false), … … 417 415 418 416 allow_ac3_passthru = gContext->GetNumSetting("AC3PassThru", false); 419 417 allow_dts_passthru = gContext->GetNumSetting("DTSPassThru", false); 418 max_channels = (uint) gContext->GetNumSetting("MaxChannels", 2); 420 419 421 420 audioIn.sample_size = -32; // force SetupAudioStream to run once 422 421 itv = GetNVP()->GetInteractiveTV(); … … 1596 1595 <<") already open, leaving it alone."); 1597 1596 } 1598 1597 //assert(enc->codec_id); 1598 VERBOSE(VB_GENERAL, LOC + QString("codec %1 has %2 channels") 1599 .arg(codec_id_string(enc->codec_id)) 1600 .arg(enc->channels)); 1599 1601 1602 #if 0 1603 // HACK MULTICHANNEL DTS passthru disabled for multichannel, 1604 // dont know how to handle this 1600 1605 // HACK BEGIN REALLY UGLY HACK FOR DTS PASSTHRU 1601 1606 if (enc->codec_id == CODEC_ID_DTS) 1602 1607 { … … 1605 1610 // enc->bit_rate = what??; 1606 1611 } 1607 1612 // HACK END REALLY UGLY HACK FOR DTS PASSTHRU 1613 #endif 1608 1614 1609 1615 bitrate += enc->bit_rate; 1610 1616 break; … … 3303 3309 reselectAudioTrack = true; 3304 3310 } 3305 3311 3312 bool do_ac3_passthru = (allow_ac3_passthru && !transcoding && 3313 (curstream->codec->codec_id == CODEC_ID_AC3)); 3314 bool do_dts_passthru = (allow_dts_passthru && !transcoding && 3315 (curstream->codec->codec_id == CODEC_ID_DTS)); 3316 bool using_passthru = do_ac3_passthru || do_dts_passthru; 3317 3306 3318 // detect channels on streams that need 3307 3319 // to be decoded before we can know this 3320 bool already_decoded = false; 3308 3321 if (!curstream->codec->channels) 3309 3322 { 3310 3323 QMutexLocker locker(&avcodeclock); 3311 curstream->codec->channels = MAX_OUTPUT_CHANNELS; 3324 VERBOSE(VB_IMPORTANT, LOC + 3325 QString("Setting channels to %1") 3326 .arg(audioOut.channels)); 3327 3328 if (using_passthru) 3329 { 3330 // for passthru let it select the max number of channels 3331 curstream->codec->channels = 0; 3332 curstream->codec->request_channels = 0; 3333 } 3334 else 3335 { 3336 curstream->codec->channels = audioOut.channels; 3337 curstream->codec->request_channels = audioOut.channels; 3338 } 3312 3339 ret = avcodec_decode_audio( 3313 3340 curstream->codec, audioSamples, 3314 3341 &data_size, ptr, len); 3342 already_decoded = true; 3315 3343 3316 3344 reselectAudioTrack |= curstream->codec->channels; 3317 3345 } … … 3369 3397 AVCodecContext *ctx = curstream->codec; 3370 3398 3371 3399 if ((ctx->channels == 0) || 3372 (ctx->channels > MAX_OUTPUT_CHANNELS))3373 ctx->channels = MAX_OUTPUT_CHANNELS;3400 (ctx->channels > audioOut.channels)) 3401 ctx->channels = audioOut.channels; 3374 3402 3375 ret = avcodec_decode_audio( 3376 ctx, audioSamples, &data_size, ptr, len); 3403 if (!already_decoded) 3404 { 3405 curstream->codec->request_channels = audioOut.channels; 3406 ret = avcodec_decode_audio( 3407 ctx, audioSamples, &data_size, ptr, len); 3408 } 3377 3409 3378 3410 // When decoding some audio streams the number of 3379 3411 // channels, etc isn't known until we try decoding it. … … 3808 3840 3809 3841 void AvFormatDecoder::SetDisablePassThrough(bool disable) 3810 3842 { 3843 // can only disable never reenable as once 3844 // timestretch is on its on for the session 3845 if (disable_passthru) 3846 return; 3847 3811 3848 if (selectedTrack[kTrackTypeAudio].av_stream_index < 0) 3812 3849 { 3813 3850 disable_passthru = disable; … … 3840 3877 AVCodecContext *codec_ctx = NULL; 3841 3878 AudioInfo old_in = audioIn; 3842 3879 AudioInfo old_out = audioOut; 3880 bool using_passthru = false; 3843 3881 3844 3882 if ((currentTrack[kTrackTypeAudio] >= 0) && 3845 3883 (selectedTrack[kTrackTypeAudio].av_stream_index <= … … 3851 3889 assert(curstream->codec); 3852 3890 codec_ctx = curstream->codec; 3853 3891 bool do_ac3_passthru = (allow_ac3_passthru && !transcoding && 3854 !disable_passthru &&3855 3892 (codec_ctx->codec_id == CODEC_ID_AC3)); 3856 3893 bool do_dts_passthru = (allow_dts_passthru && !transcoding && 3857 !disable_passthru &&3858 3894 (codec_ctx->codec_id == CODEC_ID_DTS)); 3895 using_passthru = do_ac3_passthru || do_dts_passthru; 3859 3896 info = AudioInfo(codec_ctx->codec_id, 3860 3897 codec_ctx->sample_rate, codec_ctx->channels, 3861 do_ac3_passthru || do_dts_passthru);3898 using_passthru && !disable_passthru); 3862 3899 } 3863 3900 3864 3901 if (info == audioIn) 3865 3902 return false; // no change 3866 3903 3904 QString ptmsg = (using_passthru) ? " using passthru" : ""; 3867 3905 VERBOSE(VB_AUDIO, LOC + "Initializing audio parms from " + 3868 3906 QString("audio track #%1").arg(currentTrack[kTrackTypeAudio]+1)); 3869 3907 3870 3908 audioOut = audioIn = info; 3871 if ( audioIn.do_passthru)3909 if (using_passthru) 3872 3910 { 3873 3911 // A passthru stream looks like a 48KHz 2ch (@ 16bit) to the sound card 3874 audioOut.channels = 2; 3875 audioOut.sample_rate = 48000; 3876 audioOut.sample_size = 4; 3912 AudioInfo digInfo = audioOut; 3913 if (!disable_passthru) 3914 { 3915 digInfo.channels = 2; 3916 digInfo.sample_rate = 48000; 3917 digInfo.sample_size = 4; 3918 } 3919 if (audioOut.channels > (int) max_channels) 3920 { 3921 audioOut.channels = (int) max_channels; 3922 audioOut.sample_size = audioOut.channels * 2; 3923 codec_ctx->channels = audioOut.channels; 3924 } 3925 VERBOSE(VB_AUDIO, LOC + "Audio format changed digital passthrough " + 3926 QString("%1\n\t\t\tfrom %2 ; %3\n\t\t\tto %4 ; %5") 3927 .arg(digInfo.toString()) 3928 .arg(old_in.toString()).arg(old_out.toString()) 3929 .arg(audioIn.toString()).arg(audioOut.toString())); 3930 3931 if (digInfo.sample_rate > 0) 3932 GetNVP()->SetEffDsp(digInfo.sample_rate * 100); 3933 3934 GetNVP()->SetAudioParams(digInfo.bps(), digInfo.channels, 3935 digInfo.sample_rate, audioIn.do_passthru); 3936 // allow the audio stuff to reencode 3937 GetNVP()->SetAudioCodec(codec_ctx); 3938 GetNVP()->ReinitAudio(); 3939 return true; 3877 3940 } 3878 3941 else 3879 3942 { 3880 if (audioOut.channels > MAX_OUTPUT_CHANNELS)3943 if (audioOut.channels > (int) max_channels) 3881 3944 { 3882 audioOut.channels = MAX_OUTPUT_CHANNELS;3945 audioOut.channels = (int) max_channels; 3883 3946 audioOut.sample_size = audioOut.channels * 2; 3884 codec_ctx->channels = MAX_OUTPUT_CHANNELS;3947 codec_ctx->channels = audioOut.channels; 3885 3948 } 3886 3949 } 3887 3950 … … 3896 3959 GetNVP()->SetAudioParams(audioOut.bps(), audioOut.channels, 3897 3960 audioOut.sample_rate, 3898 3961 audioIn.do_passthru); 3899 GetNVP()->ReinitAudio();3900 3962 3963 // allow the audio stuff to reencode 3964 GetNVP()->SetAudioCodec(using_passthru?codec_ctx:NULL); 3965 QString errMsg = GetNVP()->ReinitAudio(); 3966 bool audiook = errMsg.isEmpty(); 3967 3901 3968 return true; 3902 3969 } 3903 3970 -
libs/libmythtv/NuppelVideoPlayer.h
127 127 void SetAudioInfo(const QString &main, const QString &passthru, uint rate); 128 128 void SetAudioParams(int bits, int channels, int samplerate, bool passthru); 129 129 void SetEffDsp(int dsprate); 130 void SetAudioCodec(void *ac); 130 131 131 132 // Sets 132 133 void SetParentWidget(QWidget *widget) { parentWidget = widget; } … … 684 685 int audio_bits; 685 686 int audio_samplerate; 686 687 float audio_stretchfactor; 688 void *audio_codec; 687 689 bool audio_passthru; 688 690 689 691 // Picture-in-Picture -
libs/libmythtv/NuppelVideoPlayer.cpp
207 207 audio_passthru_device(QString::null), 208 208 audio_channels(2), audio_bits(-1), 209 209 audio_samplerate(44100), audio_stretchfactor(1.0f), 210 audio_codec(NULL), 210 211 // Picture-in-Picture 211 212 pipplayer(NULL), setpipplayer(NULL), needsetpipplayer(false), 212 213 // Preview window support … … 799 800 if (audioOutput) 800 801 { 801 802 audioOutput->Reconfigure(audio_bits, audio_channels, 802 audio_samplerate, audio_passthru); 803 audio_samplerate, audio_passthru, 804 audio_codec); 803 805 errMsg = audioOutput->GetError(); 804 806 if (!errMsg.isEmpty()) 805 807 audioOutput->SetStretchFactor(audio_stretchfactor); … … 3684 3691 audio_passthru = passthru; 3685 3692 } 3686 3693 3694 void NuppelVideoPlayer::SetAudioCodec(void *ac) 3695 { 3696 audio_codec = ac; 3697 } 3698 3687 3699 void NuppelVideoPlayer::SetEffDsp(int dsprate) 3688 3700 { 3689 3701 if (audioOutput) -
libs/libavcodec/liba52.c
134 134 } 135 135 } 136 136 137 static inline int16_t convert(int32_t i) 138 { 139 return av_clip_int16(i - 0x43c00000); 140 } 141 142 void float2s16_2 (float * _f, int16_t * s16) 143 { 144 int i; 145 int32_t * f = (int32_t *) _f; 146 147 for (i = 0; i < 256; i++) { 148 s16[2*i] = convert (f[i]); 149 s16[2*i+1] = convert (f[i+256]); 150 } 151 } 152 153 void float2s16_4 (float * _f, int16_t * s16) 154 { 155 int i; 156 int32_t * f = (int32_t *) _f; 157 158 for (i = 0; i < 256; i++) { 159 s16[4*i] = convert (f[i]); 160 s16[4*i+1] = convert (f[i+256]); 161 s16[4*i+2] = convert (f[i+512]); 162 s16[4*i+3] = convert (f[i+768]); 163 } 164 } 165 166 void float2s16_5 (float * _f, int16_t * s16) 167 { 168 int i; 169 int32_t * f = (int32_t *) _f; 170 171 for (i = 0; i < 256; i++) { 172 s16[5*i] = convert (f[i]); 173 s16[5*i+1] = convert (f[i+256]); 174 s16[5*i+2] = convert (f[i+512]); 175 s16[5*i+3] = convert (f[i+768]); 176 s16[5*i+4] = convert (f[i+1024]); 177 } 178 } 179 180 #define LIKEAC3DEC 1 181 int channels_multi (int flags) 182 { 183 if (flags & A52_LFE) 184 return 6; 185 else if (flags & 1) /* center channel */ 186 return 5; 187 else if ((flags & A52_CHANNEL_MASK) == A52_2F2R) 188 return 4; 189 else 190 return 2; 191 } 192 193 void float2s16_multi (float * _f, int16_t * s16, int flags) 194 { 195 int i; 196 int32_t * f = (int32_t *) _f; 197 198 switch (flags) { 199 case A52_MONO: 200 for (i = 0; i < 256; i++) { 201 s16[5*i] = s16[5*i+1] = s16[5*i+2] = s16[5*i+3] = 0; 202 s16[5*i+4] = convert (f[i]); 203 } 204 break; 205 case A52_CHANNEL: 206 case A52_STEREO: 207 case A52_DOLBY: 208 float2s16_2 (_f, s16); 209 break; 210 case A52_3F: 211 for (i = 0; i < 256; i++) { 212 s16[5*i] = convert (f[i]); 213 s16[5*i+1] = convert (f[i+512]); 214 s16[5*i+2] = s16[5*i+3] = 0; 215 s16[5*i+4] = convert (f[i+256]); 216 } 217 break; 218 case A52_2F2R: 219 float2s16_4 (_f, s16); 220 break; 221 case A52_3F2R: 222 float2s16_5 (_f, s16); 223 break; 224 case A52_MONO | A52_LFE: 225 for (i = 0; i < 256; i++) { 226 #if LIKEAC3DEC 227 s16[6*i] = s16[6*i+2] = s16[6*i+3] = s16[6*i+4] = 0; 228 s16[6*i+1] = convert (f[i+256]); 229 s16[6*i+5] = convert (f[i]); 230 #else 231 s16[6*i] = s16[6*i+1] = s16[6*i+2] = s16[6*i+3] = 0; 232 s16[6*i+4] = convert (f[i+256]); 233 s16[6*i+5] = convert (f[i]); 234 #endif 235 } 236 break; 237 case A52_CHANNEL | A52_LFE: 238 case A52_STEREO | A52_LFE: 239 case A52_DOLBY | A52_LFE: 240 for (i = 0; i < 256; i++) { 241 #if LIKEAC3DEC 242 s16[6*i] = convert (f[i+256]); 243 s16[6*i+2] = convert (f[i+512]); 244 s16[6*i+1] = s16[6*i+3] = s16[6*i+4] = 0; 245 s16[6*i+5] = convert (f[i]); 246 #else 247 s16[6*i] = convert (f[i+256]); 248 s16[6*i+1] = convert (f[i+512]); 249 s16[6*i+2] = s16[6*i+3] = s16[6*i+4] = 0; 250 s16[6*i+5] = convert (f[i]); 251 #endif 252 } 253 break; 254 case A52_3F | A52_LFE: 255 for (i = 0; i < 256; i++) { 256 #if LIKEAC3DEC 257 s16[6*i] = convert (f[i+256]); 258 s16[6*i+2] = convert (f[i+768]); 259 s16[6*i+3] = s16[6*i+4] = 0; 260 s16[6*i+1] = convert (f[i+512]); 261 s16[6*i+5] = convert (f[i]); 262 #else 263 s16[6*i] = convert (f[i+256]); 264 s16[6*i+1] = convert (f[i+768]); 265 s16[6*i+2] = s16[6*i+3] = 0; 266 s16[6*i+4] = convert (f[i+512]); 267 s16[6*i+5] = convert (f[i]); 268 #endif 269 } 270 break; 271 case A52_2F2R | A52_LFE: 272 for (i = 0; i < 256; i++) { 273 #if LIKEAC3DEC 274 s16[6*i] = convert (f[i+256]); 275 s16[6*i+1] = 0; 276 s16[6*i+2] = convert (f[i+512]); 277 s16[6*i+3] = convert (f[i+768]); 278 s16[6*i+4] = convert (f[i+1024]); 279 s16[6*i+5] = convert (f[i]); 280 #else 281 s16[6*i] = convert (f[i+256]); 282 s16[6*i+1] = convert (f[i+512]); 283 s16[6*i+2] = convert (f[i+768]); 284 s16[6*i+3] = convert (f[i+1024]); 285 s16[6*i+4] = 0; 286 s16[6*i+5] = convert (f[i]); 287 #endif 288 } 289 break; 290 case A52_3F2R | A52_LFE: 291 for (i = 0; i < 256; i++) { 292 #if LIKEAC3DEC 293 s16[6*i] = convert (f[i+256]); 294 s16[6*i+1] = convert (f[i+512]); 295 s16[6*i+2] = convert (f[i+768]); 296 s16[6*i+3] = convert (f[i+1024]); 297 s16[6*i+4] = convert (f[i+1280]); 298 s16[6*i+5] = convert (f[i]); 299 #else 300 s16[6*i] = convert (f[i+256]); 301 s16[6*i+1] = convert (f[i+768]); 302 s16[6*i+2] = convert (f[i+1024]); 303 s16[6*i+3] = convert (f[i+1280]); 304 s16[6*i+4] = convert (f[i+512]); 305 s16[6*i+5] = convert (f[i]); 306 #endif 307 } 308 break; 309 } 310 } 311 137 312 /**** end */ 138 313 139 314 #define HEADER_SIZE 7 … … 179 354 s->channels = ac3_channels[s->flags & 7]; 180 355 if (s->flags & A52_LFE) 181 356 s->channels++; 357 if (avctx->request_channels > 0) 358 { 359 avctx->channels = s->channels; 360 if (s->channels > avctx->channels) 361 avctx->channels = avctx->request_channels; 362 } 182 363 if (avctx->channels == 0) 183 364 /* No specific number of channel requested */ 184 365 avctx->channels = s->channels; … … 199 380 s->inbuf_ptr += len; 200 381 buf_size -= len; 201 382 } else { 383 int chans; 202 384 flags = s->flags; 203 385 if (avctx->channels == 1) 204 386 flags = A52_MONO; 205 else if (avctx->channels == 2) 206 flags = A52_STEREO; 387 else if (avctx->channels == 2) { 388 if (s->channels>2) 389 flags = A52_DOLBY; 390 else 391 flags = A52_STEREO; 392 } 207 393 else 208 394 flags |= A52_ADJUST_LEVEL; 209 395 level = 1; 396 chans = channels_multi(flags); 210 397 if (s->a52_frame(s->state, s->inbuf, &flags, &level, 384)) { 211 398 fail: 212 399 av_log(avctx, AV_LOG_ERROR, "Error decoding frame\n"); … … 217 404 for (i = 0; i < 6; i++) { 218 405 if (s->a52_block(s->state)) 219 406 goto fail; 220 float _to_int(s->samples, out_samples + i * 256 * avctx->channels, avctx->channels);407 float2s16_multi(s->samples, out_samples + i * 256 * chans, flags); 221 408 } 222 409 s->inbuf_ptr = s->inbuf; 223 410 s->frame_size = 0; -
libs/libavcodec/ac3dec.c
1132 1132 1133 1133 /* channel config */ 1134 1134 ctx->out_channels = ctx->nchans; 1135 if (avctx->request_channels > 0) 1136 { 1137 avctx->channels = ctx->out_channels; 1138 if (avctx->channels > avctx->request_channels) 1139 avctx->channels = avctx->request_channels; 1140 } 1135 1141 if (avctx->channels == 0) { 1136 1142 avctx->channels = ctx->out_channels; 1137 1143 } else if(ctx->out_channels < avctx->channels) { -
libs/libavcodec/dca.c
1159 1159 avctx->bit_rate = s->bit_rate; 1160 1160 1161 1161 channels = s->prim_channels + !!s->lfe; 1162 avctx->channels = avctx->request_channels; 1162 //avctx->channels = avctx->request_channels; 1163 if (avctx->request_channels > 0) 1164 { 1165 avctx->channels = channels; 1166 if (avctx->channels > avctx->request_channels) 1167 avctx->channels = avctx->request_channels; 1168 } 1163 1169 if(avctx->channels == 0) { 1164 1170 avctx->channels = channels; 1165 1171 } else if(channels < avctx->channels) {