Ticket #13446: 20190504_1902_audiotrack.patch

File 20190504_1902_audiotrack.patch, 27.7 KB (added by Peter Bennett, 7 years ago)

Add "AudioTrack" Audio playback capability for Android. Note that to build for android you need a change to the packaging repository as well

  • new file mythtv/android-package-source/src/org/mythtv/audio/AudioOutputAudioTrack.java

    diff --git a/mythtv/android-package-source/src/org/mythtv/audio/AudioOutputAudioTrack.java b/mythtv/android-package-source/src/org/mythtv/audio/AudioOutputAudioTrack.java
    new file mode 100644
    index 00000000000..31389ccf4bc
    - +  
     1package org.mythtv.audio;
     2
     3import android.media.AudioTrack;
     4import android.media.AudioAttributes;
     5import android.media.AudioFormat;
     6import android.media.AudioManager;
     7import android.media.AudioTimestamp;
     8import java.nio.ByteBuffer;
     9
     10public class AudioOutputAudioTrack
     11{
     12    AudioTrack player;
     13    AudioTimestamp timestamp = new AudioTimestamp();
     14    long timelasttaken;
     15    int samplerate;
     16    long firstwritetime;
     17    int bufferedBytes;
     18    Object syncBuffer;
     19    int bufferSize;
     20    int channels;
     21
     22    public AudioOutputAudioTrack(int encoding, int sampleRate, int bufferSize, int channels)
     23    {
     24        syncBuffer = new Object();
     25        this.bufferSize = bufferSize;
     26        this.channels = channels;
     27        AudioAttributes.Builder aab = new AudioAttributes.Builder();
     28        aab.setUsage(AudioAttributes.USAGE_MEDIA);
     29        aab.setContentType(AudioAttributes.CONTENT_TYPE_MOVIE);
     30        AudioAttributes aa = aab.build();
     31
     32        AudioFormat.Builder afb = new AudioFormat.Builder();
     33        afb.setEncoding (encoding);
     34        afb.setSampleRate (sampleRate);
     35        int channelMask = 0;
     36        switch (channels)
     37        {
     38            case 8:
     39                channelMask |= AudioFormat.CHANNEL_OUT_BACK_LEFT | AudioFormat.CHANNEL_OUT_BACK_RIGHT;
     40                // fall through
     41            case 6:
     42                channelMask |=  AudioFormat.CHANNEL_OUT_SIDE_LEFT | AudioFormat.CHANNEL_OUT_SIDE_RIGHT
     43                    | AudioFormat.CHANNEL_OUT_FRONT_CENTER | AudioFormat.CHANNEL_OUT_LOW_FREQUENCY;
     44                // fall through
     45            case 2:
     46                channelMask |=  AudioFormat.CHANNEL_OUT_FRONT_LEFT | AudioFormat.CHANNEL_OUT_FRONT_RIGHT;
     47                break;
     48            case 1:
     49                channelMask |=  AudioFormat.CHANNEL_OUT_FRONT_CENTER;
     50                break;
     51            default:
     52                // default treated as 2 channel (stereo)
     53                channelMask |=  AudioFormat.CHANNEL_OUT_FRONT_LEFT | AudioFormat.CHANNEL_OUT_FRONT_RIGHT;
     54                break;
     55        }
     56        afb.setChannelMask(channelMask);
     57        AudioFormat af = afb.build();
     58        samplerate = sampleRate;
     59        int state = 0;
     60
     61        for (int i = 0; i < 10; i++)
     62        {
     63            player = new AudioTrack(aa, af, bufferSize,
     64                AudioTrack.MODE_STREAM, AudioManager.AUDIO_SESSION_ID_GENERATE);
     65            state = player.getState();
     66            if (state == AudioTrack.STATE_INITIALIZED)
     67                break;
     68            try
     69            {
     70                Thread.sleep(50);
     71            }
     72            catch (InterruptedException ex) { }
     73        }
     74
     75        player.play();
     76    }
     77
     78    public int write(byte[] audioData, int sizeInBytes)
     79    {
     80        if (player.getPlayState() != AudioTrack.PLAYSTATE_PLAYING)
     81            player.play();
     82        if (firstwritetime == 0)
     83            firstwritetime = System.nanoTime();
     84        ByteBuffer buf = ByteBuffer.wrap(audioData);
     85        int written = 0;
     86        int ret = 0;
     87        int i;
     88        if (player != null)
     89        {
     90            while (buf.hasRemaining())
     91            {
     92                ret = player.write(buf, buf.remaining(), AudioTrack.WRITE_NON_BLOCKING);
     93                if (ret < 0)
     94                {
     95                    // written = ret;
     96                    break;
     97                }
     98                written += ret;
     99                synchronized(syncBuffer)
     100                {
     101                    bufferedBytes = bufferSize - sizeInBytes + buf.remaining();
     102                }
     103                try
     104                {
     105                    Thread.sleep(10);
     106                }
     107                catch (InterruptedException ex) {}
     108            }
     109        }
     110        else
     111            written = AudioTrack.ERROR;
     112        synchronized(syncBuffer)
     113        {
     114            bufferedBytes = bufferSize;
     115        }
     116        return written;
     117    }
     118
     119    public int getBufferedBytes ()
     120    {
     121        int ret;
     122        synchronized(syncBuffer)
     123        {
     124            ret = bufferedBytes;
     125        }
     126        return ret;
     127    }
     128
     129    // Get playback position in frames.
     130    public int getPlaybackHeadPosition ()
     131    {
     132        if (player != null)
     133        {
     134            long currentTime = System.nanoTime();
     135            // only run getTimestamp once every 10 seconds
     136            if (currentTime - timelasttaken > 10000000000L
     137               || (currentTime - timelasttaken > 1000000000L
     138                   && timestamp.framePosition < 48000))
     139            {
     140                if (!player.getTimestamp(timestamp))
     141                    return 0;
     142                if (timestamp.framePosition < 0)
     143                    timestamp.framePosition &= 0x00000000ffffffff;
     144                timelasttaken = currentTime;
     145            }
     146            if (timestamp.framePosition < 48000)
     147                return 0;
     148            return (int) (timestamp.framePosition +
     149                (currentTime - timestamp.nanoTime)  * samplerate / 1000000000L);
     150        }
     151        else
     152            return 0;
     153    }
     154
     155    // Get latency in milliseconds
     156    // Only correct if there has been no pause
     157    // and no starving
     158    public int getLatency ()
     159    {
     160        if (player != null && firstwritetime != 0)
     161        {
     162            long currentTime = System.nanoTime();
     163            // only run getTimestamp once every 1 second
     164            if (currentTime - timelasttaken > 1000000000L)
     165            {
     166                if (!player.getTimestamp(timestamp))
     167                    return -1;
     168                if (timestamp.framePosition < 0)
     169                    timestamp.framePosition &= 0x00000000ffffffff;
     170                timelasttaken = currentTime;
     171            }
     172            if (timestamp.framePosition < 1000)
     173                return -1;
     174            long playMillisec = timestamp.framePosition * 1000 / samplerate;
     175            long elapsedMillisec = (timestamp.nanoTime - firstwritetime)/1000000L;
     176            return (int) (elapsedMillisec - playMillisec);
     177        }
     178        else
     179            return -1;
     180    }
     181
     182
     183    public void pause (boolean doPause)
     184    {
     185        if (player == null)
     186            return;
     187
     188        if (doPause)
     189        {
     190            if (player.getPlayState() == AudioTrack.PLAYSTATE_PLAYING)
     191                player.pause();
     192        }
     193        else
     194        {
     195            if (player.getPlayState() != AudioTrack.PLAYSTATE_PLAYING)
     196                player.play();
     197        }
     198    }
     199
     200    public void release ()
     201    {
     202        if (player != null)
     203            player.release();
     204        player = null;
     205    }
     206
     207}
  • mythtv/libs/libmyth/audio/audiooutput.cpp

    diff --git a/mythtv/libs/libmyth/audio/audiooutput.cpp b/mythtv/libs/libmyth/audio/audiooutput.cpp
    index 73f8259bda7..54f1edacea7 100644
    a b using namespace std;  
    3838#endif
    3939#ifdef Q_OS_ANDROID
    4040#include "audiooutputopensles.h"
     41#include "audiooutputaudiotrack.h"
    4142#endif
    4243#ifdef USING_OPENMAX
    4344#include "audiooutput_omx.h"
    AudioOutput *AudioOutput::OpenAudio(AudioSettings &settings,  
    206207        LOG(VB_GENERAL, LOG_ERR, "Audio output device is set to a OpenSLES "
    207208                                 "device but Android support is not compiled "
    208209                                 "in!");
     210#endif
     211    }
     212    else if (main_device.startsWith("AudioTrack:"))
     213    {
     214#ifdef Q_OS_ANDROID
     215        ret = new AudioOutputAudioTrack(settings);
     216#else
     217        LOG(VB_GENERAL, LOG_ERR, "Audio output device is set to AudioTrack "
     218                                 "device but Android support is not compiled "
     219                                 "in!");
    209220#endif
    210221    }
    211222    else if (main_device.startsWith("OpenMAX:"))
    AudioOutput::ADCVect* AudioOutput::GetOutputList(void)  
    564575#ifdef ANDROID
    565576    {
    566577        QString name = "OpenSLES:";
    567         QString desc =  tr("OpenSLES default output.");
     578        QString desc =  tr("OpenSLES default output. Stereo support only.");
     579        adc = GetAudioDeviceConfig(name, desc);
     580        if (adc)
     581        {
     582            list->append(*adc);
     583            delete adc;
     584        }
     585    }
     586    {
     587        QString name = "AudioTrack:";
     588        QString desc =  tr("Android AudioTrack output. Supports surround sound.");
    568589        adc = GetAudioDeviceConfig(name, desc);
    569590        if (adc)
    570591        {
  • mythtv/libs/libmyth/audio/audiooutput_omx.cpp

    diff --git a/mythtv/libs/libmyth/audio/audiooutput_omx.cpp b/mythtv/libs/libmyth/audio/audiooutput_omx.cpp
    index d79b786053d..fa60d255ca5 100644
    a b int AudioOutputOMX::GetBufferedOnSoundcard(void) const  
    576576    }
    577577
    578578#ifdef USING_BROADCOM
     579    // output bytes per 10 frames
     580    int obpf;
     581    if (m_passthru && !usesSpdif())
     582        obpf = m_source_bitrate * 10 / m_source_samplerate;
     583    else
     584        obpf = m_output_bytes_per_frame * 80;
     585
    579586    OMX_PARAM_U32TYPE u;
    580587    OMX_DATA_INIT(u);
    581588    u.nPortIndex = m_audiorender.Base();
    int AudioOutputOMX::GetBufferedOnSoundcard(void) const  
    586593            "GetConfig AudioRenderingLatency error %1").arg(Error2String(e)));
    587594        return 0;
    588595    }
    589     return u.nU32 * m_output_bytes_per_frame;
     596    return u.nU32 * obpf / 80;
    590597#else
    591598    return m_pending;
    592599#endif
  • new file mythtv/libs/libmyth/audio/audiooutputaudiotrack.cpp

    diff --git a/mythtv/libs/libmyth/audio/audiooutputaudiotrack.cpp b/mythtv/libs/libmyth/audio/audiooutputaudiotrack.cpp
    new file mode 100644
    index 00000000000..95f533834aa
    - +  
     1
     2#include "config.h"
     3
     4using namespace std;
     5
     6#include <QAndroidJniObject>
     7#include <QAndroidJniEnvironment>
     8
     9#include "mythlogging.h"
     10#include "audiooutputaudiotrack.h"
     11
     12#define CHANNELS_MIN 1
     13#define CHANNELS_MAX 8
     14
     15#define ANDROID_EXCEPTION_CHECK \
     16  if (env->ExceptionCheck()) { \
     17    env->ExceptionDescribe(); \
     18    env->ExceptionClear(); \
     19    exception=true; \
     20  } else \
     21    exception=false;
     22// clear exception without checking
     23#define ANDROID_EXCEPTION_CLEAR \
     24  if (env->ExceptionCheck()) { \
     25    env->ExceptionDescribe(); \
     26    env->ExceptionClear(); \
     27  }
     28
     29#define LOC QString("AudioTrack: ")
     30
     31// Constants from Android Java API
     32// class android.media.AudioFormat
     33#define AF_CHANNEL_OUT_MONO 4
     34#define AF_CHANNEL_OUT_STEREO 12
     35#define AF_CHANNEL_OUT_SURROUND 1052
     36#define AF_ENCODING_AC3 5
     37#define AF_ENCODING_E_AC3 6
     38#define AF_ENCODING_DTS 7
     39#define AF_ENCODING_DOLBY_TRUEHD 14
     40#define AF_ENCODING_PCM_8BIT 3
     41#define AF_ENCODING_PCM_16BIT 2
     42#define AF_ENCODING_PCM_FLOAT 4
     43
     44// for debugging
     45#include <android/log.h>
     46
     47AudioOutputAudioTrack::AudioOutputAudioTrack(const AudioSettings &settings) :
     48    AudioOutputBase(settings)
     49{
     50    InitSettings(settings);
     51    if (settings.m_init)
     52        Reconfigure(settings);
     53}
     54
     55AudioOutputAudioTrack::~AudioOutputAudioTrack()
     56{
     57    KillAudio();
     58    CloseDevice();
     59}
     60
     61bool AudioOutputAudioTrack::OpenDevice()
     62{
     63    bool exception=false;
     64    QAndroidJniEnvironment env;
     65    jint encoding = 0;
     66    jint sampleRate = m_samplerate;
     67
     68    // obpf = output bits per 10 frames
     69    int obpf = m_output_bytes_per_frame * 80;
     70
     71    if (m_passthru || m_enc)
     72        obpf = m_source_bitrate * 10 / m_source_samplerate;
     73
     74    // 50 milliseconds
     75    m_fragment_size = obpf * m_source_samplerate * 5 / 8000;
     76
     77    if (m_fragment_size < 1536)
     78        m_fragment_size = 1536;
     79
     80
     81    if (m_passthru || m_enc)
     82    {
     83        switch (m_codec)
     84        {
     85            case AV_CODEC_ID_AC3:
     86                encoding = AF_ENCODING_AC3;
     87                break;
     88            case AV_CODEC_ID_DTS:
     89                encoding = AF_ENCODING_DTS;
     90                break;
     91            case AV_CODEC_ID_EAC3:
     92                encoding = AF_ENCODING_E_AC3;
     93                break;
     94            case AV_CODEC_ID_TRUEHD:
     95                encoding = AF_ENCODING_DOLBY_TRUEHD;
     96                break;
     97
     98            default:
     99                LOG(VB_GENERAL, LOG_ERR, LOC + __func__ + QString(" No support for audio passthru encoding %1").arg(m_codec));
     100                return false;
     101        }
     102    }
     103    else
     104    {
     105        switch (m_output_format)
     106        {
     107            case FORMAT_U8:
     108                // This could be used to get the value from java instead         // of haning these constants in pour header file.
     109                // encoding = QAndroidJniObject::getStaticField<jint>
     110                //   ("android.media.AudioFormat","ENCODING_PCM_8BIT");
     111                encoding = AF_ENCODING_PCM_8BIT;
     112                break;
     113            case FORMAT_S16:
     114                encoding = AF_ENCODING_PCM_16BIT;
     115                break;
     116            case FORMAT_FLT:
     117                encoding = AF_ENCODING_PCM_FLOAT;
     118                break;
     119            default:
     120                LOG(VB_GENERAL, LOG_ERR, LOC + __func__ + QString(" No support for audio format %1").arg(m_output_format));
     121                return false;
     122        }
     123    }
     124
     125    jint minBufferSize = m_fragment_size * 4;
     126    m_soundcard_buffer_size = minBufferSize;
     127    jint channels = m_channels;
     128
     129    m_bytesWritten = 0;
     130    m_startTimeCode = 0;
     131    m_audioTrack = new QAndroidJniObject("org/mythtv/audio/AudioOutputAudioTrack",
     132        "(IIII)V", encoding, sampleRate, minBufferSize, channels);
     133    ANDROID_EXCEPTION_CHECK
     134
     135    if (exception)
     136    {
     137        LOG(VB_GENERAL, LOG_ERR, LOC + __func__ + QString(" Java Exception when creating AudioTrack"));
     138        m_audioTrack = nullptr;
     139        return false;
     140    }
     141    return true;
     142}
     143
     144void AudioOutputAudioTrack::CloseDevice()
     145{
     146    QAndroidJniEnvironment env;
     147    if (m_audioTrack)
     148    {
     149        m_audioTrack->callMethod<void>("release");
     150        ANDROID_EXCEPTION_CLEAR
     151        delete m_audioTrack;
     152        m_audioTrack = nullptr;
     153    }
     154}
     155
     156AudioOutputSettings* AudioOutputAudioTrack::GetOutputSettings(bool /* digital */)
     157{
     158    bool exception=false;
     159    QAndroidJniEnvironment env;
     160    jint bufsize = 0;
     161
     162    AudioOutputSettings *settings = new AudioOutputSettings();
     163
     164    int supportedrate = 0;
     165    while (int rate = settings->GetNextRate())
     166    {
     167        // Checking for valid rates using getMinBufferSize.
     168        // See https://stackoverflow.com/questions/8043387/android-audiorecord-supported-sampling-rates/22317382
     169        bufsize = QAndroidJniObject::callStaticMethod<jint>
     170            ("android/media/AudioTrack", "getMinBufferSize", "(III)I",
     171             rate, AF_CHANNEL_OUT_MONO, AF_ENCODING_PCM_16BIT);
     172        ANDROID_EXCEPTION_CHECK
     173        if (bufsize > 0 && !exception)
     174        {
     175            settings->AddSupportedRate(rate);
     176            // save any supported rate for later
     177            supportedrate = rate;
     178        }
     179    }
     180
     181    // Checking for valid format using getMinBufferSize.
     182    bufsize = QAndroidJniObject::callStaticMethod<jint>
     183        ("android/media/AudioTrack", "getMinBufferSize", "(III)I",
     184            supportedrate, AF_CHANNEL_OUT_MONO, AF_ENCODING_PCM_8BIT);
     185    ANDROID_EXCEPTION_CHECK
     186    if (bufsize > 0 && !exception)
     187        settings->AddSupportedFormat(FORMAT_U8);
     188    // 16bit always supported
     189    settings->AddSupportedFormat(FORMAT_S16);
     190
     191    bufsize = QAndroidJniObject::callStaticMethod<jint>
     192        ("android/media/AudioTrack", "getMinBufferSize", "(III)I",
     193            supportedrate, AF_CHANNEL_OUT_MONO, AF_ENCODING_PCM_FLOAT);
     194    ANDROID_EXCEPTION_CHECK
     195    if (bufsize > 0 && !exception)
     196    settings->AddSupportedFormat(FORMAT_FLT);
     197
     198    for (uint channels = CHANNELS_MIN; channels <= CHANNELS_MAX; channels++)
     199    {
     200        settings->AddSupportedChannels(channels);
     201    }
     202    settings->setPassthrough(0);
     203
     204    return settings;
     205}
     206
     207void AudioOutputAudioTrack::WriteAudio(unsigned char* aubuf, int size)
     208{
     209    bool exception=false;
     210    QAndroidJniEnvironment env;
     211    if (m_actually_paused)
     212    {
     213        jboolean param = true;
     214        m_audioTrack->callMethod<void>("pause","(Z)V",param);
     215        ANDROID_EXCEPTION_CLEAR
     216        return;
     217    }
     218    // create a java byte array
     219    jbyteArray arr = env->NewByteArray(size);
     220    env->SetByteArrayRegion(arr, 0, size, reinterpret_cast<jbyte*>(aubuf));
     221    jint ret = -99;
     222    if (m_audioTrack)
     223    {
     224        ret = m_audioTrack->callMethod<jint>("write","([BI)I", arr, size);
     225        ANDROID_EXCEPTION_CHECK
     226    }
     227    env->DeleteLocalRef(arr);
     228    if (ret != size || exception)
     229        LOG(VB_GENERAL, LOG_ERR, LOC + __func__
     230          + QString(" Audio Write failed, size %1 return %2 exception %3")
     231          .arg(size).arg(ret).arg(exception));
     232
     233    if (ret>0)
     234        m_bytesWritten += ret - size;
     235
     236    LOG(VB_AUDIO | VB_TIMESTAMP, LOG_INFO, LOC + __func__
     237        + QString(" WriteAudio size=%1 written=%2")
     238        .arg(size).arg(ret));
     239}
     240
     241
     242int AudioOutputAudioTrack::GetBufferedOnSoundcard(void) const
     243{
     244    bool exception=false;
     245    QAndroidJniEnvironment env;
     246    int buffered (0);
     247    if (m_audioTrack)
     248    {
     249        buffered
     250            = m_audioTrack->callMethod<jint>("getBufferedBytes");
     251        ANDROID_EXCEPTION_CHECK
     252        if (exception)
     253            buffered = 0;
     254    }
     255
     256    return buffered;
     257}
     258
     259bool AudioOutputAudioTrack::AddData(void *in_buffer, int in_len,
     260                              int64_t timecode, int in_frames)
     261{
     262    bool ret = AudioOutputBase::AddData
     263        (in_buffer, in_len, timecode,in_frames);
     264
     265    if (m_startTimeCode == 0)
     266        m_startTimeCode = GetBaseAudBufTimeCode();
     267
     268    return ret;
     269}
     270
     271void AudioOutputAudioTrack::Pause(bool paused)
     272{
     273    AudioOutputBase::Pause(paused);
     274    jboolean param = paused;
     275    m_audioTrack->callMethod<void>("pause","(Z)V",param);
     276}
  • new file mythtv/libs/libmyth/audio/audiooutputaudiotrack.h

    diff --git a/mythtv/libs/libmyth/audio/audiooutputaudiotrack.h b/mythtv/libs/libmyth/audio/audiooutputaudiotrack.h
    new file mode 100644
    index 00000000000..2279ec6a3b1
    - +  
     1#ifndef _AUDIOOUTPUTAUDIOTRACK_H_
     2#define _AUDIOOUTPUTAUDIOTRACK_H_
     3
     4#include "audiooutputbase.h"
     5
     6class QAndroidJniObject;
     7/*
     8
     9    Audio output for android based on android.media.AudioTrack.
     10
     11    This uses the java class org.mythtv.audio.AudioOutputAudioTrack
     12    to invoke android media playback methods.
     13
     14*/
     15
     16class AudioOutputAudioTrack : public AudioOutputBase
     17{
     18  public:
     19    explicit AudioOutputAudioTrack(const AudioSettings &settings);
     20    ~AudioOutputAudioTrack() override;
     21
     22    bool AddData(void *buffer, int len, int64_t timecode, int frames) override; // AudioOutput
     23
     24    // Volume control
     25    int GetVolumeChannel(int /* channel */) const override // VolumeBase
     26        { return 100; }
     27    void SetVolumeChannel(int /* channel */, int /* volume */) override // VolumeBase
     28        {}
     29    void Pause(bool paused) override; // AudioOutput
     30
     31  protected:
     32    bool OpenDevice(void) override; // AudioOutputBase
     33    void CloseDevice(void) override; // AudioOutputBase
     34    void WriteAudio(unsigned char *aubuf, int size) override; // AudioOutputBase
     35    int  GetBufferedOnSoundcard(void) const override; // AudioOutputBase
     36    AudioOutputSettings* GetOutputSettings(bool digital) override; // AudioOutputBase
     37    QAndroidJniObject *m_audioTrack {nullptr};
     38    uint32_t m_bytesWritten {0};
     39    int64_t m_startTimeCode {0};
     40};
     41
     42#endif //_AUDIOOUTPUTAUDIOTRACK_H_
  • mythtv/libs/libmyth/audio/audiooutputbase.cpp

    diff --git a/mythtv/libs/libmyth/audio/audiooutputbase.cpp b/mythtv/libs/libmyth/audio/audiooutputbase.cpp
    index 1200bc2106c..1e07886c6b4 100644
    a b using namespace std;  
    2424#include "mythlogging.h"
    2525#include "mythconfig.h"
    2626
     27// AC3 encode currently disabled for Android
     28#if defined(Q_OS_ANDROID)
     29#define DISABLE_AC3_ENCODE
     30#endif
     31
    2732#define LOC QString("AOBase: ")
    2833
    2934#define WPOS (m_audiobuffer + org_waud)
    AudioOutputBase::AudioOutputBase(const AudioSettings &settings) :  
    6166    memset(m_src_in_buf,         0, sizeof(m_src_in_buf));
    6267    memset(m_audiobuffer,        0, sizeof(m_audiobuffer));
    6368
     69    if (m_main_device.startsWith("OpenMAX:")
     70        || m_main_device.startsWith("AudioTrack:"))
     71        m_usesSpdif = false;
    6472    // Handle override of SRC quality settings
    6573    if (gCoreContext->GetBoolSetting("SRCQualityOverride", false))
    6674    {
    void AudioOutputBase::SetStretchFactorLocked(float lstretchfactor)  
    313321        m_pSoundStretch->setSampleRate(m_samplerate);
    314322        m_pSoundStretch->setChannels(channels);
    315323        m_pSoundStretch->setTempo(m_stretchfactor);
    316 #if ARCH_ARM
     324#if ARCH_ARM || defined(Q_OS_ANDROID)
    317325        // use less demanding settings for Raspberry pi
    318326        m_pSoundStretch->setSetting(SETTING_SEQUENCE_MS, 82);
    319327        m_pSoundStretch->setSetting(SETTING_USE_AA_FILTER, 0);
    bool AudioOutputBase::SetupPassthrough(AVCodecID codec, int codec_profile,  
    415423
    416424    delete m_spdifenc;
    417425
    418     // No spdif encoder if using openmax audio
    419     if (m_main_device.startsWith("OpenMAX:"))
    420         m_spdifenc = nullptr;
    421     else
     426    // No spdif encoder needed for certain devices
     427    if (m_usesSpdif)
    422428        m_spdifenc = new SPDIFEncoder("spdif", codec);
     429    else
     430        m_spdifenc = nullptr;
    423431    if (m_spdifenc && m_spdifenc->Succeeded() && codec == AV_CODEC_ID_DTS)
    424432    {
    425433        switch(codec_profile)
    void AudioOutputBase::Reconfigure(const AudioSettings &orig_settings)  
    476484            m_output_settings->IsSupportedChannels(lconfigured_channels);
    477485
    478486        // check if the number of channels could be transmitted via AC3 encoding
     487#ifndef DISABLE_AC3_ENCODE
    479488        lenc = m_output_settingsdigital->canFeature(FEATURE_AC3) &&
    480489            (!m_output_settings->canFeature(FEATURE_LPCM) &&
    481490             lconfigured_channels > 2 && lconfigured_channels <= 6);
    482 
     491#endif
    483492        if (!lenc && !cando_channels)
    484493        {
    485494            // if hardware doesn't support source audio configuration
    void AudioOutputBase::Reconfigure(const AudioSettings &orig_settings)  
    517526           and we have more than 2 channels but multichannel PCM is not
    518527           supported or if the device just doesn't support the number of
    519528           channels */
     529#ifndef DISABLE_AC3_ENCODE
    520530        lenc = m_output_settingsdigital->canFeature(FEATURE_AC3) &&
    521531            ((!m_output_settings->canFeature(FEATURE_LPCM) &&
    522532              lconfigured_channels > 2) ||
    523533             !m_output_settings->IsSupportedChannels(lconfigured_channels));
    524 
    525534        /* Might we reencode a bitstream that's been decoded for timestretch?
    526535           If the device doesn't support the number of channels - see below */
    527536        if (m_output_settingsdigital->canFeature(FEATURE_AC3) &&
    void AudioOutputBase::Reconfigure(const AudioSettings &orig_settings)  
    530539        {
    531540            lreenc = true;
    532541        }
    533 
     542#endif
    534543        // Enough channels? Upmix if not, but only from mono/stereo/5.0 to 5.1
    535544        if (IS_VALID_UPMIX_CHANNEL(settings.m_channels) &&
    536545            settings.m_channels < lconfigured_channels)
    void AudioOutputBase::SetEffDsp(int dsprate)  
    968977/**
    969978 * Get the number of bytes in the audiobuffer
    970979 */
    971 inline int AudioOutputBase::audiolen()
     980inline int AudioOutputBase::audiolen() const
    972981{
    973982    if (m_waud >= m_raud)
    974983        return m_waud - m_raud;
    inline int AudioOutputBase::audiolen()  
    978987/**
    979988 * Get the free space in the audiobuffer in bytes
    980989 */
    981 int AudioOutputBase::audiofree()
     990int AudioOutputBase::audiofree() const
    982991{
    983992    return kAudioRingBufferSize - audiolen() - 1;
    984993    /* There is one wasted byte in the buffer. The case where waud = raud is
    int AudioOutputBase::audiofree()  
    9931002 * This value can differ from that returned by audiolen if samples are
    9941003 * being converted to floats and the output sample format is not 32 bits
    9951004 */
    996 int AudioOutputBase::audioready()
     1005int AudioOutputBase::audioready() const
    9971006{
    9981007    if (m_passthru || m_enc || m_bytes_per_frame == m_output_bytes_per_frame)
    9991008        return audiolen();
    int64_t AudioOutputBase::GetAudiotime(void)  
    10081017    if (m_audbuf_timecode == 0 || !m_configure_succeeded)
    10091018        return 0;
    10101019
    1011     int obpf = m_output_bytes_per_frame;
     1020    // output bits per 10 frames
     1021    int64_t obpf;
     1022
     1023    if (m_passthru && !usesSpdif())
     1024        obpf = m_source_bitrate * 10 / m_source_samplerate;
     1025    else
     1026    if (m_enc && !usesSpdif())
     1027    {
     1028        // re-encode bitrate is hardcoded at 448000
     1029        obpf = 448000 * 10 / m_source_samplerate;
     1030    }
     1031    else
     1032        obpf = m_output_bytes_per_frame * 80;
     1033
    10121034    int64_t oldaudiotime;
    10131035
    10141036    /* We want to calculate 'audiotime', which is the timestamp of the audio
    int64_t AudioOutputBase::GetAudiotime(void)  
    10291051
    10301052    QMutexLocker lockav(&m_avsync_lock);
    10311053
    1032     int soundcard_buffer = GetBufferedOnSoundcard(); // bytes
     1054    int64_t soundcard_buffer = GetBufferedOnSoundcard(); // bytes
    10331055
    10341056    /* audioready tells us how many bytes are in audiobuffer
    10351057       scaled appropriately if output format != internal format */
    1036     int main_buffer = audioready();
     1058    int64_t main_buffer = audioready();
    10371059
    10381060    oldaudiotime = m_audiotime;
    10391061
    int64_t AudioOutputBase::GetAudiotime(void)  
    10411063       of major post-stretched buffer contents
    10421064       processing latencies are catered for in AddData/SetAudiotime
    10431065       to eliminate race */
    1044     m_audiotime = m_audbuf_timecode - (m_effdsp && obpf ? (
    1045         ((int64_t)(main_buffer + soundcard_buffer) * m_eff_stretchfactor) /
    1046         (m_effdsp * obpf)) : 0);
     1066
     1067    m_audiotime = m_audbuf_timecode - (m_effdsp && obpf ?
     1068        ((main_buffer + soundcard_buffer) * int64_t(m_eff_stretchfactor)
     1069        * 80 / int64_t(m_effdsp) / obpf) : 0);
    10471070
    10481071    /* audiotime should never go backwards, but we might get a negative
    10491072       value if GetBufferedOnSoundcard() isn't updated by the driver very
  • mythtv/libs/libmyth/audio/audiooutputbase.h

    diff --git a/mythtv/libs/libmyth/audio/audiooutputbase.h b/mythtv/libs/libmyth/audio/audiooutputbase.h
    index ea830b43585..b2a44e80403 100644
    a b class AudioOutputBase : public AudioOutput, public MThread  
    155155
    156156    int CheckFreeSpace(int &frames);
    157157
    158     inline int audiolen(); // number of valid bytes in audio buffer
    159     int audiofree();       // number of free bytes in audio buffer
    160     int audioready();      // number of bytes ready to be written
     158    inline int audiolen() const; // number of valid bytes in audio buffer
     159    int audiofree() const;       // number of free bytes in audio buffer
     160    int audioready() const;      // number of bytes ready to be written
    161161
    162162    void SetStretchFactorLocked(float factor);
    163163
    164164    // For audiooutputca
    165165    int GetBaseAudBufTimeCode() const { return m_audbuf_timecode; }
    166166
     167    bool usesSpdif() const { return m_usesSpdif; }
     168
    167169  protected:
    168170    // Basic details about the audio stream
    169171    int               m_channels                   {-1};
    class AudioOutputBase : public AudioOutput, public MThread  
    295297    int64_t           m_length_last_data                  {0};
    296298
    297299    // SPDIF Encoder for digital passthrough
     300    bool              m_usesSpdif                         {true};
    298301    SPDIFEncoder     *m_spdifenc                          {nullptr};
    299302
    300303    // Flag indicating if SetStretchFactor enabled audio float processing
  • mythtv/libs/libmyth/libmyth.pro

    diff --git a/mythtv/libs/libmyth/libmyth.pro b/mythtv/libs/libmyth/libmyth.pro
    index bb5f4a9e200..3c2de90068e 100644
    a b unix:!cygwin {  
    179179
    180180android {
    181181SOURCES += audio/audiooutputopensles.cpp
     182SOURCES += audio/audiooutputaudiotrack.cpp
    182183HEADERS += audio/audiooutputopensles.h
     184HEADERS += audio/audiooutputaudiotrack.h
    183185}
    184186
    185187linux:DEFINES += linux