diff --git a/mythtv/android-package-source/src/org/mythtv/audio/AudioOutputAudioTrack.java b/mythtv/android-package-source/src/org/mythtv/audio/AudioOutputAudioTrack.java
new file mode 100644
index 00000000000..31389ccf4bc
--- /dev/null
+++ b/mythtv/android-package-source/src/org/mythtv/audio/AudioOutputAudioTrack.java
@@ -0,0 +1,207 @@
+package org.mythtv.audio;
+
+import android.media.AudioTrack;
+import android.media.AudioAttributes;
+import android.media.AudioFormat;
+import android.media.AudioManager;
+import android.media.AudioTimestamp;
+import java.nio.ByteBuffer;
+
+public class AudioOutputAudioTrack
+{
+    AudioTrack player;
+    AudioTimestamp timestamp = new AudioTimestamp();
+    long timelasttaken;
+    int samplerate;
+    long firstwritetime;
+    int bufferedBytes;
+    Object syncBuffer;
+    int bufferSize;
+    int channels;
+
+    public AudioOutputAudioTrack(int encoding, int sampleRate, int bufferSize, int channels)
+    {
+        syncBuffer = new Object();
+        this.bufferSize = bufferSize;
+        this.channels = channels;
+        AudioAttributes.Builder aab = new AudioAttributes.Builder();
+        aab.setUsage(AudioAttributes.USAGE_MEDIA);
+        aab.setContentType(AudioAttributes.CONTENT_TYPE_MOVIE);
+        AudioAttributes aa = aab.build();
+
+        AudioFormat.Builder afb = new AudioFormat.Builder();
+        afb.setEncoding (encoding);
+        afb.setSampleRate (sampleRate);
+        int channelMask = 0;
+        switch (channels)
+        {
+            case 8:
+                channelMask |= AudioFormat.CHANNEL_OUT_BACK_LEFT | AudioFormat.CHANNEL_OUT_BACK_RIGHT;
+                // fall through
+            case 6:
+                channelMask |=  AudioFormat.CHANNEL_OUT_SIDE_LEFT | AudioFormat.CHANNEL_OUT_SIDE_RIGHT
+                    | AudioFormat.CHANNEL_OUT_FRONT_CENTER | AudioFormat.CHANNEL_OUT_LOW_FREQUENCY;
+                // fall through
+            case 2:
+                channelMask |=  AudioFormat.CHANNEL_OUT_FRONT_LEFT | AudioFormat.CHANNEL_OUT_FRONT_RIGHT;
+                break;
+            case 1:
+                channelMask |=  AudioFormat.CHANNEL_OUT_FRONT_CENTER;
+                break;
+            default:
+                // default treated as 2 channel (stereo)
+                channelMask |=  AudioFormat.CHANNEL_OUT_FRONT_LEFT | AudioFormat.CHANNEL_OUT_FRONT_RIGHT;
+                break;
+        }
+        afb.setChannelMask(channelMask);
+        AudioFormat af = afb.build();
+        samplerate = sampleRate;
+        int state = 0;
+
+        for (int i = 0; i < 10; i++)
+        {
+            player = new AudioTrack(aa, af, bufferSize,
+                AudioTrack.MODE_STREAM, AudioManager.AUDIO_SESSION_ID_GENERATE);
+            state = player.getState();
+            if (state == AudioTrack.STATE_INITIALIZED)
+                break;
+            try
+            {
+                Thread.sleep(50);
+            }
+            catch (InterruptedException ex) { }
+        }
+
+        player.play();
+    }
+
+    public int write(byte[] audioData, int sizeInBytes)
+    {
+        if (player.getPlayState() != AudioTrack.PLAYSTATE_PLAYING)
+            player.play();
+        if (firstwritetime == 0)
+            firstwritetime = System.nanoTime();
+        ByteBuffer buf = ByteBuffer.wrap(audioData);
+        int written = 0;
+        int ret = 0;
+        int i;
+        if (player != null)
+        {
+            while (buf.hasRemaining())
+            {
+                ret = player.write(buf, buf.remaining(), AudioTrack.WRITE_NON_BLOCKING);
+                if (ret < 0)
+                {
+                    // written = ret;
+                    break;
+                }
+                written += ret;
+                synchronized(syncBuffer)
+                {
+                    bufferedBytes = bufferSize - sizeInBytes + buf.remaining();
+                }
+                try
+                {
+                    Thread.sleep(10);
+                }
+                catch (InterruptedException ex) {}
+            }
+        }
+        else
+            written = AudioTrack.ERROR;
+        synchronized(syncBuffer)
+        {
+            bufferedBytes = bufferSize;
+        }
+        return written;
+    }
+
+    public int getBufferedBytes ()
+    {
+        int ret;
+        synchronized(syncBuffer)
+        {
+            ret = bufferedBytes;
+        }
+        return ret;
+    }
+
+    // Get playback position in frames.
+    public int getPlaybackHeadPosition ()
+    {
+        if (player != null)
+        {
+            long currentTime = System.nanoTime();
+            // only run getTimestamp once every 10 seconds
+            if (currentTime - timelasttaken > 10000000000L
+               || (currentTime - timelasttaken > 1000000000L
+                   && timestamp.framePosition < 48000))
+            {
+                if (!player.getTimestamp(timestamp))
+                    return 0;
+                if (timestamp.framePosition < 0)
+                    timestamp.framePosition &= 0x00000000ffffffff;
+                timelasttaken = currentTime;
+            }
+            if (timestamp.framePosition < 48000)
+                return 0;
+            return (int) (timestamp.framePosition +
+                (currentTime - timestamp.nanoTime)  * samplerate / 1000000000L);
+        }
+        else
+            return 0;
+    }
+
+    // Get latency in milliseconds
+    // Only correct if there has been no pause
+    // and no starving
+    public int getLatency ()
+    {
+        if (player != null && firstwritetime != 0)
+        {
+            long currentTime = System.nanoTime();
+            // only run getTimestamp once every 1 second
+            if (currentTime - timelasttaken > 1000000000L)
+            {
+                if (!player.getTimestamp(timestamp))
+                    return -1;
+                if (timestamp.framePosition < 0)
+                    timestamp.framePosition &= 0x00000000ffffffff;
+                timelasttaken = currentTime;
+            }
+            if (timestamp.framePosition < 1000)
+                return -1;
+            long playMillisec = timestamp.framePosition * 1000 / samplerate;
+            long elapsedMillisec = (timestamp.nanoTime - firstwritetime)/1000000L;
+            return (int) (elapsedMillisec - playMillisec);
+        }
+        else
+            return -1;
+    }
+
+
+    public void pause (boolean doPause)
+    {
+        if (player == null)
+            return;
+
+        if (doPause)
+        {
+            if (player.getPlayState() == AudioTrack.PLAYSTATE_PLAYING)
+                player.pause();
+        }
+        else
+        {
+            if (player.getPlayState() != AudioTrack.PLAYSTATE_PLAYING)
+                player.play();
+        }
+    }
+
+    public void release ()
+    {
+        if (player != null)
+            player.release();
+        player = null;
+    }
+
+}
diff --git a/mythtv/libs/libmyth/audio/audiooutput.cpp b/mythtv/libs/libmyth/audio/audiooutput.cpp
index 73f8259bda7..54f1edacea7 100644
--- a/mythtv/libs/libmyth/audio/audiooutput.cpp
+++ b/mythtv/libs/libmyth/audio/audiooutput.cpp
@@ -38,6 +38,7 @@ using namespace std;
 #endif
 #ifdef Q_OS_ANDROID
 #include "audiooutputopensles.h"
+#include "audiooutputaudiotrack.h"
 #endif
 #ifdef USING_OPENMAX
 #include "audiooutput_omx.h"
@@ -206,6 +207,16 @@ AudioOutput *AudioOutput::OpenAudio(AudioSettings &settings,
         LOG(VB_GENERAL, LOG_ERR, "Audio output device is set to a OpenSLES "
                                  "device but Android support is not compiled "
                                  "in!");
+#endif
+    }
+    else if (main_device.startsWith("AudioTrack:"))
+    {
+#ifdef Q_OS_ANDROID
+        ret = new AudioOutputAudioTrack(settings);
+#else
+        LOG(VB_GENERAL, LOG_ERR, "Audio output device is set to AudioTrack "
+                                 "device but Android support is not compiled "
+                                 "in!");
 #endif
     }
     else if (main_device.startsWith("OpenMAX:"))
@@ -564,7 +575,17 @@ AudioOutput::ADCVect* AudioOutput::GetOutputList(void)
 #ifdef ANDROID
     {
         QString name = "OpenSLES:";
-        QString desc =  tr("OpenSLES default output.");
+        QString desc =  tr("OpenSLES default output. Stereo support only.");
+        adc = GetAudioDeviceConfig(name, desc);
+        if (adc)
+        {
+            list->append(*adc);
+            delete adc;
+        }
+    }
+    {
+        QString name = "AudioTrack:";
+        QString desc =  tr("Android AudioTrack output. Supports surround sound.");
         adc = GetAudioDeviceConfig(name, desc);
         if (adc)
         {
diff --git a/mythtv/libs/libmyth/audio/audiooutput_omx.cpp b/mythtv/libs/libmyth/audio/audiooutput_omx.cpp
index d79b786053d..fa60d255ca5 100644
--- a/mythtv/libs/libmyth/audio/audiooutput_omx.cpp
+++ b/mythtv/libs/libmyth/audio/audiooutput_omx.cpp
@@ -576,6 +576,13 @@ int AudioOutputOMX::GetBufferedOnSoundcard(void) const
     }
 
 #ifdef USING_BROADCOM
+    // output bytes per 10 frames
+    int obpf;
+    if (m_passthru && !usesSpdif())
+        obpf = m_source_bitrate * 10 / m_source_samplerate;
+    else
+        obpf = m_output_bytes_per_frame * 80;
+
     OMX_PARAM_U32TYPE u;
     OMX_DATA_INIT(u);
     u.nPortIndex = m_audiorender.Base();
@@ -586,7 +593,7 @@ int AudioOutputOMX::GetBufferedOnSoundcard(void) const
             "GetConfig AudioRenderingLatency error %1").arg(Error2String(e)));
         return 0;
     }
-    return u.nU32 * m_output_bytes_per_frame;
+    return u.nU32 * obpf / 80;
 #else
     return m_pending;
 #endif
diff --git a/mythtv/libs/libmyth/audio/audiooutputaudiotrack.cpp b/mythtv/libs/libmyth/audio/audiooutputaudiotrack.cpp
new file mode 100644
index 00000000000..95f533834aa
--- /dev/null
+++ b/mythtv/libs/libmyth/audio/audiooutputaudiotrack.cpp
@@ -0,0 +1,276 @@
+
+#include "config.h"
+
+using namespace std;
+
+#include <QAndroidJniObject>
+#include <QAndroidJniEnvironment>
+
+#include "mythlogging.h"
+#include "audiooutputaudiotrack.h"
+
+#define CHANNELS_MIN 1
+#define CHANNELS_MAX 8
+
+#define ANDROID_EXCEPTION_CHECK \
+  if (env->ExceptionCheck()) { \
+    env->ExceptionDescribe(); \
+    env->ExceptionClear(); \
+    exception=true; \
+  } else \
+    exception=false;
+// clear exception without checking
+#define ANDROID_EXCEPTION_CLEAR \
+  if (env->ExceptionCheck()) { \
+    env->ExceptionDescribe(); \
+    env->ExceptionClear(); \
+  }
+
+#define LOC QString("AudioTrack: ")
+
+// Constants from Android Java API
+// class android.media.AudioFormat
+#define AF_CHANNEL_OUT_MONO 4
+#define AF_CHANNEL_OUT_STEREO 12
+#define AF_CHANNEL_OUT_SURROUND 1052
+#define AF_ENCODING_AC3 5
+#define AF_ENCODING_E_AC3 6
+#define AF_ENCODING_DTS 7
+#define AF_ENCODING_DOLBY_TRUEHD 14
+#define AF_ENCODING_PCM_8BIT 3
+#define AF_ENCODING_PCM_16BIT 2
+#define AF_ENCODING_PCM_FLOAT 4
+
+// for debugging
+#include <android/log.h>
+
+AudioOutputAudioTrack::AudioOutputAudioTrack(const AudioSettings &settings) :
+    AudioOutputBase(settings)
+{
+    InitSettings(settings);
+    if (settings.m_init)
+        Reconfigure(settings);
+}
+
+AudioOutputAudioTrack::~AudioOutputAudioTrack()
+{
+    KillAudio();
+    CloseDevice();
+}
+
+bool AudioOutputAudioTrack::OpenDevice()
+{
+    bool exception=false;
+    QAndroidJniEnvironment env;
+    jint encoding = 0;
+    jint sampleRate = m_samplerate;
+
+    // obpf = output bits per 10 frames
+    int obpf = m_output_bytes_per_frame * 80;
+
+    if (m_passthru || m_enc)
+        obpf = m_source_bitrate * 10 / m_source_samplerate;
+
+    // 50 milliseconds
+    m_fragment_size = obpf * m_source_samplerate * 5 / 8000;
+
+    if (m_fragment_size < 1536)
+        m_fragment_size = 1536;
+
+
+    if (m_passthru || m_enc)
+    {
+        switch (m_codec)
+        {
+            case AV_CODEC_ID_AC3:
+                encoding = AF_ENCODING_AC3;
+                break;
+            case AV_CODEC_ID_DTS:
+                encoding = AF_ENCODING_DTS;
+                break;
+            case AV_CODEC_ID_EAC3:
+                encoding = AF_ENCODING_E_AC3;
+                break;
+            case AV_CODEC_ID_TRUEHD:
+                encoding = AF_ENCODING_DOLBY_TRUEHD;
+                break;
+
+            default:
+                LOG(VB_GENERAL, LOG_ERR, LOC + __func__ + QString(" No support for audio passthru encoding %1").arg(m_codec));
+                return false;
+        }
+    }
+    else
+    {
+        switch (m_output_format)
+        {
+            case FORMAT_U8:
+                // This could be used to get the value from java instead         // of haning these constants in pour header file.
+                // encoding = QAndroidJniObject::getStaticField<jint>
+                //   ("android.media.AudioFormat","ENCODING_PCM_8BIT");
+                encoding = AF_ENCODING_PCM_8BIT;
+                break;
+            case FORMAT_S16:
+                encoding = AF_ENCODING_PCM_16BIT;
+                break;
+            case FORMAT_FLT:
+                encoding = AF_ENCODING_PCM_FLOAT;
+                break;
+            default:
+                LOG(VB_GENERAL, LOG_ERR, LOC + __func__ + QString(" No support for audio format %1").arg(m_output_format));
+                return false;
+        }
+    }
+
+    jint minBufferSize = m_fragment_size * 4;
+    m_soundcard_buffer_size = minBufferSize;
+    jint channels = m_channels;
+
+    m_bytesWritten = 0;
+    m_startTimeCode = 0;
+    m_audioTrack = new QAndroidJniObject("org/mythtv/audio/AudioOutputAudioTrack",
+        "(IIII)V", encoding, sampleRate, minBufferSize, channels);
+    ANDROID_EXCEPTION_CHECK
+
+    if (exception)
+    {
+        LOG(VB_GENERAL, LOG_ERR, LOC + __func__ + QString(" Java Exception when creating AudioTrack"));
+        m_audioTrack = nullptr;
+        return false;
+    }
+    return true;
+}
+
+void AudioOutputAudioTrack::CloseDevice()
+{
+    QAndroidJniEnvironment env;
+    if (m_audioTrack)
+    {
+        m_audioTrack->callMethod<void>("release");
+        ANDROID_EXCEPTION_CLEAR
+        delete m_audioTrack;
+        m_audioTrack = nullptr;
+    }
+}
+
+AudioOutputSettings* AudioOutputAudioTrack::GetOutputSettings(bool /* digital */)
+{
+    bool exception=false;
+    QAndroidJniEnvironment env;
+    jint bufsize = 0;
+
+    AudioOutputSettings *settings = new AudioOutputSettings();
+
+    int supportedrate = 0;
+    while (int rate = settings->GetNextRate())
+    {
+        // Checking for valid rates using getMinBufferSize.
+        // See https://stackoverflow.com/questions/8043387/android-audiorecord-supported-sampling-rates/22317382
+        bufsize = QAndroidJniObject::callStaticMethod<jint>
+            ("android/media/AudioTrack", "getMinBufferSize", "(III)I",
+             rate, AF_CHANNEL_OUT_MONO, AF_ENCODING_PCM_16BIT);
+        ANDROID_EXCEPTION_CHECK
+        if (bufsize > 0 && !exception)
+        {
+            settings->AddSupportedRate(rate);
+            // save any supported rate for later
+            supportedrate = rate;
+        }
+    }
+
+    // Checking for valid format using getMinBufferSize.
+    bufsize = QAndroidJniObject::callStaticMethod<jint>
+        ("android/media/AudioTrack", "getMinBufferSize", "(III)I",
+            supportedrate, AF_CHANNEL_OUT_MONO, AF_ENCODING_PCM_8BIT);
+    ANDROID_EXCEPTION_CHECK
+    if (bufsize > 0 && !exception)
+        settings->AddSupportedFormat(FORMAT_U8);
+    // 16bit always supported
+    settings->AddSupportedFormat(FORMAT_S16);
+
+    bufsize = QAndroidJniObject::callStaticMethod<jint>
+        ("android/media/AudioTrack", "getMinBufferSize", "(III)I",
+            supportedrate, AF_CHANNEL_OUT_MONO, AF_ENCODING_PCM_FLOAT);
+    ANDROID_EXCEPTION_CHECK
+    if (bufsize > 0 && !exception)
+    settings->AddSupportedFormat(FORMAT_FLT);
+
+    for (uint channels = CHANNELS_MIN; channels <= CHANNELS_MAX; channels++)
+    {
+        settings->AddSupportedChannels(channels);
+    }
+    settings->setPassthrough(0);
+
+    return settings;
+}
+
+void AudioOutputAudioTrack::WriteAudio(unsigned char* aubuf, int size)
+{
+    bool exception=false;
+    QAndroidJniEnvironment env;
+    if (m_actually_paused)
+    {
+        jboolean param = true;
+        m_audioTrack->callMethod<void>("pause","(Z)V",param);
+        ANDROID_EXCEPTION_CLEAR
+        return;
+    }
+    // create a java byte array
+    jbyteArray arr = env->NewByteArray(size);
+    env->SetByteArrayRegion(arr, 0, size, reinterpret_cast<jbyte*>(aubuf));
+    jint ret = -99;
+    if (m_audioTrack)
+    {
+        ret = m_audioTrack->callMethod<jint>("write","([BI)I", arr, size);
+        ANDROID_EXCEPTION_CHECK
+    }
+    env->DeleteLocalRef(arr);
+    if (ret != size || exception)
+        LOG(VB_GENERAL, LOG_ERR, LOC + __func__
+          + QString(" Audio Write failed, size %1 return %2 exception %3")
+          .arg(size).arg(ret).arg(exception));
+
+    if (ret>0)
+        m_bytesWritten += ret - size;
+
+    LOG(VB_AUDIO | VB_TIMESTAMP, LOG_INFO, LOC + __func__
+        + QString(" WriteAudio size=%1 written=%2")
+        .arg(size).arg(ret));
+}
+
+
+int AudioOutputAudioTrack::GetBufferedOnSoundcard(void) const
+{
+    bool exception=false;
+    QAndroidJniEnvironment env;
+    int buffered (0);
+    if (m_audioTrack)
+    {
+        buffered
+            = m_audioTrack->callMethod<jint>("getBufferedBytes");
+        ANDROID_EXCEPTION_CHECK
+        if (exception)
+            buffered = 0;
+    }
+
+    return buffered;
+}
+
+bool AudioOutputAudioTrack::AddData(void *in_buffer, int in_len,
+                              int64_t timecode, int in_frames)
+{
+    bool ret = AudioOutputBase::AddData
+        (in_buffer, in_len, timecode,in_frames);
+
+    if (m_startTimeCode == 0)
+        m_startTimeCode = GetBaseAudBufTimeCode();
+
+    return ret;
+}
+
+void AudioOutputAudioTrack::Pause(bool paused)
+{
+    AudioOutputBase::Pause(paused);
+    jboolean param = paused;
+    m_audioTrack->callMethod<void>("pause","(Z)V",param);
+}
diff --git a/mythtv/libs/libmyth/audio/audiooutputaudiotrack.h b/mythtv/libs/libmyth/audio/audiooutputaudiotrack.h
new file mode 100644
index 00000000000..2279ec6a3b1
--- /dev/null
+++ b/mythtv/libs/libmyth/audio/audiooutputaudiotrack.h
@@ -0,0 +1,42 @@
+#ifndef _AUDIOOUTPUTAUDIOTRACK_H_
+#define _AUDIOOUTPUTAUDIOTRACK_H_
+
+#include "audiooutputbase.h"
+
+class QAndroidJniObject;
+/*
+
+    Audio output for android based on android.media.AudioTrack.
+
+    This uses the java class org.mythtv.audio.AudioOutputAudioTrack
+    to invoke android media playback methods.
+
+*/
+
+class AudioOutputAudioTrack : public AudioOutputBase
+{
+  public:
+    explicit AudioOutputAudioTrack(const AudioSettings &settings);
+    ~AudioOutputAudioTrack() override;
+
+    bool AddData(void *buffer, int len, int64_t timecode, int frames) override; // AudioOutput
+
+    // Volume control
+    int GetVolumeChannel(int /* channel */) const override // VolumeBase
+        { return 100; }
+    void SetVolumeChannel(int /* channel */, int /* volume */) override // VolumeBase
+        {}
+    void Pause(bool paused) override; // AudioOutput
+
+  protected:
+    bool OpenDevice(void) override; // AudioOutputBase
+    void CloseDevice(void) override; // AudioOutputBase
+    void WriteAudio(unsigned char *aubuf, int size) override; // AudioOutputBase
+    int  GetBufferedOnSoundcard(void) const override; // AudioOutputBase
+    AudioOutputSettings* GetOutputSettings(bool digital) override; // AudioOutputBase
+    QAndroidJniObject *m_audioTrack {nullptr};
+    uint32_t m_bytesWritten {0};
+    int64_t m_startTimeCode {0};
+};
+
+#endif //_AUDIOOUTPUTAUDIOTRACK_H_
diff --git a/mythtv/libs/libmyth/audio/audiooutputbase.cpp b/mythtv/libs/libmyth/audio/audiooutputbase.cpp
index 1200bc2106c..1e07886c6b4 100644
--- a/mythtv/libs/libmyth/audio/audiooutputbase.cpp
+++ b/mythtv/libs/libmyth/audio/audiooutputbase.cpp
@@ -24,6 +24,11 @@ using namespace std;
 #include "mythlogging.h"
 #include "mythconfig.h"
 
+// AC3 encode currently disabled for Android
+#if defined(Q_OS_ANDROID)
+#define DISABLE_AC3_ENCODE
+#endif
+
 #define LOC QString("AOBase: ")
 
 #define WPOS (m_audiobuffer + org_waud)
@@ -61,6 +66,9 @@ AudioOutputBase::AudioOutputBase(const AudioSettings &settings) :
     memset(m_src_in_buf,         0, sizeof(m_src_in_buf));
     memset(m_audiobuffer,        0, sizeof(m_audiobuffer));
 
+    if (m_main_device.startsWith("OpenMAX:")
+        || m_main_device.startsWith("AudioTrack:"))
+        m_usesSpdif = false;
     // Handle override of SRC quality settings
     if (gCoreContext->GetBoolSetting("SRCQualityOverride", false))
     {
@@ -313,7 +321,7 @@ void AudioOutputBase::SetStretchFactorLocked(float lstretchfactor)
         m_pSoundStretch->setSampleRate(m_samplerate);
         m_pSoundStretch->setChannels(channels);
         m_pSoundStretch->setTempo(m_stretchfactor);
-#if ARCH_ARM
+#if ARCH_ARM || defined(Q_OS_ANDROID)
         // use less demanding settings for Raspberry pi
         m_pSoundStretch->setSetting(SETTING_SEQUENCE_MS, 82);
         m_pSoundStretch->setSetting(SETTING_USE_AA_FILTER, 0);
@@ -415,11 +423,11 @@ bool AudioOutputBase::SetupPassthrough(AVCodecID codec, int codec_profile,
 
     delete m_spdifenc;
 
-    // No spdif encoder if using openmax audio
-    if (m_main_device.startsWith("OpenMAX:"))
-        m_spdifenc = nullptr;
-    else
+    // No spdif encoder needed for certain devices
+    if (m_usesSpdif)
         m_spdifenc = new SPDIFEncoder("spdif", codec);
+    else
+        m_spdifenc = nullptr;
     if (m_spdifenc && m_spdifenc->Succeeded() && codec == AV_CODEC_ID_DTS)
     {
         switch(codec_profile)
@@ -476,10 +484,11 @@ void AudioOutputBase::Reconfigure(const AudioSettings &orig_settings)
             m_output_settings->IsSupportedChannels(lconfigured_channels);
 
         // check if the number of channels could be transmitted via AC3 encoding
+#ifndef DISABLE_AC3_ENCODE
         lenc = m_output_settingsdigital->canFeature(FEATURE_AC3) &&
             (!m_output_settings->canFeature(FEATURE_LPCM) &&
              lconfigured_channels > 2 && lconfigured_channels <= 6);
-
+#endif
         if (!lenc && !cando_channels)
         {
             // if hardware doesn't support source audio configuration
@@ -517,11 +526,11 @@ void AudioOutputBase::Reconfigure(const AudioSettings &orig_settings)
            and we have more than 2 channels but multichannel PCM is not
            supported or if the device just doesn't support the number of
            channels */
+#ifndef DISABLE_AC3_ENCODE
         lenc = m_output_settingsdigital->canFeature(FEATURE_AC3) &&
             ((!m_output_settings->canFeature(FEATURE_LPCM) &&
               lconfigured_channels > 2) ||
              !m_output_settings->IsSupportedChannels(lconfigured_channels));
-
         /* Might we reencode a bitstream that's been decoded for timestretch?
            If the device doesn't support the number of channels - see below */
         if (m_output_settingsdigital->canFeature(FEATURE_AC3) &&
@@ -530,7 +539,7 @@ void AudioOutputBase::Reconfigure(const AudioSettings &orig_settings)
         {
             lreenc = true;
         }
-
+#endif
         // Enough channels? Upmix if not, but only from mono/stereo/5.0 to 5.1
         if (IS_VALID_UPMIX_CHANNEL(settings.m_channels) &&
             settings.m_channels < lconfigured_channels)
@@ -968,7 +977,7 @@ void AudioOutputBase::SetEffDsp(int dsprate)
 /**
  * Get the number of bytes in the audiobuffer
  */
-inline int AudioOutputBase::audiolen()
+inline int AudioOutputBase::audiolen() const
 {
     if (m_waud >= m_raud)
         return m_waud - m_raud;
@@ -978,7 +987,7 @@ inline int AudioOutputBase::audiolen()
 /**
  * Get the free space in the audiobuffer in bytes
  */
-int AudioOutputBase::audiofree()
+int AudioOutputBase::audiofree() const
 {
     return kAudioRingBufferSize - audiolen() - 1;
     /* There is one wasted byte in the buffer. The case where waud = raud is
@@ -993,7 +1002,7 @@ int AudioOutputBase::audiofree()
  * This value can differ from that returned by audiolen if samples are
  * being converted to floats and the output sample format is not 32 bits
  */
-int AudioOutputBase::audioready()
+int AudioOutputBase::audioready() const
 {
     if (m_passthru || m_enc || m_bytes_per_frame == m_output_bytes_per_frame)
         return audiolen();
@@ -1008,7 +1017,20 @@ int64_t AudioOutputBase::GetAudiotime(void)
     if (m_audbuf_timecode == 0 || !m_configure_succeeded)
         return 0;
 
-    int obpf = m_output_bytes_per_frame;
+    // output bits per 10 frames
+    int64_t obpf;
+
+    if (m_passthru && !usesSpdif())
+        obpf = m_source_bitrate * 10 / m_source_samplerate;
+    else
+    if (m_enc && !usesSpdif())
+    {
+        // re-encode bitrate is hardcoded at 448000
+        obpf = 448000 * 10 / m_source_samplerate;
+    }
+    else
+        obpf = m_output_bytes_per_frame * 80;
+
     int64_t oldaudiotime;
 
     /* We want to calculate 'audiotime', which is the timestamp of the audio
@@ -1029,11 +1051,11 @@ int64_t AudioOutputBase::GetAudiotime(void)
 
     QMutexLocker lockav(&m_avsync_lock);
 
-    int soundcard_buffer = GetBufferedOnSoundcard(); // bytes
+    int64_t soundcard_buffer = GetBufferedOnSoundcard(); // bytes
 
     /* audioready tells us how many bytes are in audiobuffer
        scaled appropriately if output format != internal format */
-    int main_buffer = audioready();
+    int64_t main_buffer = audioready();
 
     oldaudiotime = m_audiotime;
 
@@ -1041,9 +1063,10 @@ int64_t AudioOutputBase::GetAudiotime(void)
        of major post-stretched buffer contents
        processing latencies are catered for in AddData/SetAudiotime
        to eliminate race */
-    m_audiotime = m_audbuf_timecode - (m_effdsp && obpf ? (
-        ((int64_t)(main_buffer + soundcard_buffer) * m_eff_stretchfactor) /
-        (m_effdsp * obpf)) : 0);
+
+    m_audiotime = m_audbuf_timecode - (m_effdsp && obpf ?
+        ((main_buffer + soundcard_buffer) * int64_t(m_eff_stretchfactor)
+        * 80 / int64_t(m_effdsp) / obpf) : 0);
 
     /* audiotime should never go backwards, but we might get a negative
        value if GetBufferedOnSoundcard() isn't updated by the driver very
diff --git a/mythtv/libs/libmyth/audio/audiooutputbase.h b/mythtv/libs/libmyth/audio/audiooutputbase.h
index ea830b43585..b2a44e80403 100644
--- a/mythtv/libs/libmyth/audio/audiooutputbase.h
+++ b/mythtv/libs/libmyth/audio/audiooutputbase.h
@@ -155,15 +155,17 @@ class AudioOutputBase : public AudioOutput, public MThread
 
     int CheckFreeSpace(int &frames);
 
-    inline int audiolen(); // number of valid bytes in audio buffer
-    int audiofree();       // number of free bytes in audio buffer
-    int audioready();      // number of bytes ready to be written
+    inline int audiolen() const; // number of valid bytes in audio buffer
+    int audiofree() const;       // number of free bytes in audio buffer
+    int audioready() const;      // number of bytes ready to be written
 
     void SetStretchFactorLocked(float factor);
 
     // For audiooutputca
     int GetBaseAudBufTimeCode() const { return m_audbuf_timecode; }
 
+    bool usesSpdif() const { return m_usesSpdif; }
+
   protected:
     // Basic details about the audio stream
     int               m_channels                   {-1};
@@ -295,6 +297,7 @@ class AudioOutputBase : public AudioOutput, public MThread
     int64_t           m_length_last_data                  {0};
 
     // SPDIF Encoder for digital passthrough
+    bool              m_usesSpdif                         {true};
     SPDIFEncoder     *m_spdifenc                          {nullptr};
 
     // Flag indicating if SetStretchFactor enabled audio float processing
diff --git a/mythtv/libs/libmyth/libmyth.pro b/mythtv/libs/libmyth/libmyth.pro
index bb5f4a9e200..3c2de90068e 100644
--- a/mythtv/libs/libmyth/libmyth.pro
+++ b/mythtv/libs/libmyth/libmyth.pro
@@ -179,7 +179,9 @@ unix:!cygwin {
 
 android {
 SOURCES += audio/audiooutputopensles.cpp
+SOURCES += audio/audiooutputaudiotrack.cpp
 HEADERS += audio/audiooutputopensles.h
+HEADERS += audio/audiooutputaudiotrack.h
 }
 
 linux:DEFINES += linux
