commit 3b0dd2efb281b59143405d23ccd32cde42fa78c2
Author: Mark Spieth <mspieth@digivation.com.au>
Date:   Tue Apr 27 07:51:51 2010 +1000

    smoother vsync with predictive frame skipping

diff --git a/mythtv/libs/libmyth/audiooutput.h b/mythtv/libs/libmyth/audiooutput.h
index 2b22a1e..0b28cfb 100644
--- a/mythtv/libs/libmyth/audiooutput.h
+++ b/mythtv/libs/libmyth/audiooutput.h
@@ -44,20 +44,20 @@ class MPUBLIC AudioOutput : public VolumeBase, public OutputListeners
 
     // timecode is in milliseconds.
     // Return true if all samples were written, false if none.
-    virtual bool AddSamples(char *buffer, int samples, long long timecode) = 0;
-    virtual bool AddSamples(char *buffers[], int samples, long long timecode) = 0;
+    virtual bool AddSamples(char *buffer, int samples, int64_t timecode) = 0;
+    virtual bool AddSamples(char *buffers[], int samples, int64_t timecode) = 0;
 
-    virtual void SetTimecode(long long timecode) = 0;
+    virtual void SetTimecode(int64_t timecode) = 0;
     virtual bool IsPaused(void) const = 0;
     virtual void Pause(bool paused) = 0;
  
     // Wait for all data to finish playing
     virtual void Drain(void) = 0;
 
-    virtual int GetAudiotime(void) = 0;
+    virtual int64_t GetAudiotime(void) = 0;
 
     /// report amount of audio buffered in milliseconds.
-    virtual int GetAudioBufferedTime(void) { return 0; }
+    virtual int64_t GetAudioBufferedTime(void) { return 0; }
 
     virtual void SetSourceBitrate(int ) { }
 
diff --git a/mythtv/libs/libmyth/audiooutputbase.cpp b/mythtv/libs/libmyth/audiooutputbase.cpp
index 0b9f2ef..8ea2b24 100644
--- a/mythtv/libs/libmyth/audiooutputbase.cpp
+++ b/mythtv/libs/libmyth/audiooutputbase.cpp
@@ -19,6 +19,10 @@
 #define LOC QString("AO: ")
 #define LOC_ERR QString("AO, ERROR: ")
 
+#define EFF_FACTOR_F  100000.0
+#define EFF_FACTOR_I  100000
+#define EFF_FACTOR_LL 100000LL
+
 AudioOutputBase::AudioOutputBase(const AudioSettings &settings) :
     // protected
     effdsp(0),                  effdspstretched(0),
@@ -31,6 +35,7 @@ AudioOutputBase::AudioOutputBase(const AudioSettings &settings) :
     audio_passthru_device(settings.GetPassthruDevice()),
     audio_passthru(false),      audio_enc(false),
     audio_reenc(false),         audio_stretchfactor(1.0f),
+    eff_audio_stretchfactor(10000),
 
     source(settings.source),    killaudio(false),
 
@@ -143,6 +148,7 @@ void AudioOutputBase::SetStretchFactorLocked(float laudio_stretchfactor)
     if ((audio_stretchfactor != laudio_stretchfactor) ||  !pSoundStretch)
     {
         audio_stretchfactor = laudio_stretchfactor;
+        eff_audio_stretchfactor = (int)(EFF_FACTOR_F * laudio_stretchfactor);
         if (pSoundStretch)
         {
             VERBOSE(VB_GENERAL, LOC + QString("Changing time stretch to %1")
@@ -528,11 +534,11 @@ void AudioOutputBase::Reset()
     gettimeofday(&audiotime_updated, NULL);
 }
 
-void AudioOutputBase::SetTimecode(long long timecode)
+void AudioOutputBase::SetTimecode(int64_t timecode)
 {
     QMutexLocker locker(&audio_buflock);
     audbuf_timecode = timecode;
-    samples_buffered = (long long)((timecode * effdsp) / 100000.0);
+    samples_buffered = (int64_t)((timecode * effdsp) / EFF_FACTOR_I);
 }
 
 void AudioOutputBase::SetEffDsp(int dsprate)
@@ -580,7 +586,7 @@ int AudioOutputBase::audiofree(bool use_lock)
        be is kAudioRingBufferSize - 1. */
 }
 
-int AudioOutputBase::GetAudiotime(void)
+int64_t AudioOutputBase::GetAudiotime(void)
 {
     /* Returns the current timecode of audio leaving the soundcard, based
        on the 'audiotime' computed earlier, and the delay since it was computed.
@@ -590,7 +596,7 @@ int AudioOutputBase::GetAudiotime(void)
        The reason is that computing 'audiotime' requires acquiring the audio
        lock, which the video thread should not do. So, we call 'SetAudioTime()'
        from the audio thread, and then call this from the video thread. */
-    long long ret;
+    int64_t ret;
     struct timeval now;
 
     if (audiotime == 0)
@@ -602,7 +608,7 @@ int AudioOutputBase::GetAudiotime(void)
 
     ret = (now.tv_sec - audiotime_updated.tv_sec) * 1000;
     ret += (now.tv_usec - audiotime_updated.tv_usec) / 1000;
-    ret = (long long)(ret * audio_stretchfactor);
+    ret = (int64_t)(ret * audio_stretchfactor);
 
 #if 1
     VERBOSE(VB_AUDIO+VB_TIMESTAMP,
@@ -617,7 +623,7 @@ int AudioOutputBase::GetAudiotime(void)
 
     ret += audiotime;
 
-    return (int)ret;
+    return ret;
 }
 
 void AudioOutputBase::SetAudiotime(void)
@@ -625,8 +631,9 @@ void AudioOutputBase::SetAudiotime(void)
     if (audbuf_timecode == 0)
         return;
 
-    int soundcard_buffer = 0;
-    int totalbuffer;
+    int64_t soundcard_buffer = 0;
+    int64_t totalsamples_stretched;
+    int64_t totalsamples_unstretched = 0;
 
     /* We want to calculate 'audiotime', which is the timestamp of the audio
        which is leaving the sound card at this instant.
@@ -649,31 +656,35 @@ void AudioOutputBase::SetAudiotime(void)
     QMutexLocker lock2(&avsync_lock);
 
     soundcard_buffer = GetBufferedOnSoundcard(); // bytes
-    totalbuffer = audiolen(false) + soundcard_buffer;
-
-    // include algorithmic latencies
-    if (pSoundStretch)
-        totalbuffer += (int)((pSoundStretch->numUnprocessedSamples() *
-                              audio_bytes_per_sample) / audio_stretchfactor);
+    // major post-stretched buffer contents
+    totalsamples_stretched = (audiolen(false) + soundcard_buffer) / audio_bytes_per_sample;
 
+    // include algorithmic pre-stretch latencies
     if (upmixer && needs_upmix)
-        totalbuffer += upmixer->sampleLatency() * audio_bytes_per_sample;
+        totalsamples_unstretched += upmixer->sampleLatency();
 
+    if (pSoundStretch)
+        totalsamples_unstretched += pSoundStretch->numUnprocessedSamples();
+
+    // include algorithmic post-stretch latencies
     if (encoder) 
-         totalbuffer += encoder->Buffered();
+        // the input buffered data is still in audio_bytes_per_sample format
+        totalsamples_stretched += encoder->Buffered() / audio_bytes_per_sample;
 
-    audiotime = audbuf_timecode - (int)(totalbuffer * 100000.0 /
-                                   (audio_bytes_per_sample * effdspstretched));
+    // timecode is the stretch adjusted version
+    audiotime = audbuf_timecode - (int64_t)((totalsamples_unstretched * EFF_FACTOR_I + 
+                totalsamples_stretched * eff_audio_stretchfactor ) / effdsp );
 
     gettimeofday(&audiotime_updated, NULL);
 #if 1
     VERBOSE(VB_AUDIO+VB_TIMESTAMP,
             QString("SetAudiotime set=%1.%2, audt=%3 atc=%4 "
-                    "tb=%5 sb=%6 eds=%7 abps=%8 sf=%9")
+                    "tss=%5 tsu=%6 sb=%7 eds=%8 abps=%9 sf=%10")
             .arg(audiotime_updated.tv_sec).arg(audiotime_updated.tv_usec)
             .arg(audiotime)
             .arg(audbuf_timecode)
-            .arg(totalbuffer)
+            .arg(totalsamples_stretched)
+            .arg(totalsamples_unstretched)
             .arg(soundcard_buffer)
             .arg(effdspstretched)
             .arg(audio_bytes_per_sample)
@@ -681,7 +692,7 @@ void AudioOutputBase::SetAudiotime(void)
 #endif
 }
 
-int AudioOutputBase::GetAudioBufferedTime(void)
+int64_t AudioOutputBase::GetAudioBufferedTime(void)
 {
      return audbuf_timecode - GetAudiotime();
 }
@@ -744,7 +755,7 @@ void AudioOutputBase::_AdjustVolume(AudioDataType *buffer, int len, bool music)
 }
 
 bool AudioOutputBase::AddSamples(char *buffers[], int samples,
-                                 long long timecode)
+                                 int64_t timecode)
 {
     // NOTE: This function is not threadsafe
     int afree = audiofree(true);
@@ -823,7 +834,7 @@ bool AudioOutputBase::AddSamples(char *buffers[], int samples,
     return true;
 }
 
-bool AudioOutputBase::AddSamples(char *buffer, int samples, long long timecode)
+bool AudioOutputBase::AddSamples(char *buffer, int samples, int64_t timecode)
 {
     // NOTE: This function is not threadsafe
 
@@ -965,12 +976,13 @@ void *AudioOutputBase::_MonoToStereo(AudioDataType *s1, AudioDataType *s2, int s
     return s2;
 }
 
-void AudioOutputBase::_AddSamples(void *buffer, bool interleaved, int samples,
-                                  long long timecode)
+void AudioOutputBase::_AddSamples(void *buffer, bool interleaved, int in_samples,
+                                  int64_t timecode)
 {
     int len; // = samples * audio_bytes_per_sample;
     int audio_bytes = audio_bits / 8;
     int org_waud = waud;
+    int samples = in_samples;
 
     int afree = audiofree(false);
 
@@ -1009,7 +1021,7 @@ void AudioOutputBase::_AddSamples(void *buffer, bool interleaved, int samples,
         int out_samples = 0;
         org_waud = waud;
         int step = (interleaved)?source_audio_channels:1;
-	
+
         for (int itemp = 0; itemp < samples; )
         {
             if (audio_bytes == 2)
@@ -1094,113 +1106,114 @@ void AudioOutputBase::_AddSamples(void *buffer, bool interleaved, int samples,
         }
     }
 
-    if (samples <= 0)
-        return;
-        
-    if (pSoundStretch)
+    if (samples > 0)
     {
-        // does not change the timecode, only the number of samples
-        // back to orig pos
-        org_waud = waud;
-        int bdiff = kAudioRingBufferSize - org_waud;
-        int nSamplesToEnd = bdiff/abps;
-        if (bdiff < len)
-        {
-            pSoundStretch->putSamples((soundtouch::SAMPLETYPE*)
-                                      (audiobuffer +
-                                       org_waud), nSamplesToEnd);
-            pSoundStretch->putSamples((soundtouch::SAMPLETYPE*)audiobuffer,
-                                      (len - bdiff) / abps);
-        }
-        else
-        {
-            pSoundStretch->putSamples((soundtouch::SAMPLETYPE*)
-                                      (audiobuffer + org_waud),
-                                      len / abps);
-        }
 
-        int nSamples = pSoundStretch->numSamples();
-        len = WaitForFreeSpace(nSamples); 
-        
-        while ((nSamples = pSoundStretch->numSamples())) 
+        if (pSoundStretch)
         {
-            if (nSamples > nSamplesToEnd) 
-                nSamples = nSamplesToEnd;
-            
-            nSamples = pSoundStretch->receiveSamples(
-                (soundtouch::SAMPLETYPE*)
-                (audiobuffer + org_waud), nSamples
-            );
-            
-            if (nSamples == nSamplesToEnd) {
-                org_waud = 0;
-                nSamplesToEnd = kAudioRingBufferSize/abps;
+            // does not change the timecode, only the number of samples
+            // back to orig pos
+            org_waud = waud;
+            int bdiff = kAudioRingBufferSize - org_waud;
+            int nSamplesToEnd = bdiff/abps;
+            if (bdiff < len)
+            {
+                pSoundStretch->putSamples((soundtouch::SAMPLETYPE*)
+                        (audiobuffer +
+                         org_waud), nSamplesToEnd);
+                pSoundStretch->putSamples((soundtouch::SAMPLETYPE*)audiobuffer,
+                        (len - bdiff) / abps);
             }
-            else {
-                org_waud += nSamples * abps;
-                nSamplesToEnd -= nSamples;
+            else
+            {
+                pSoundStretch->putSamples((soundtouch::SAMPLETYPE*)
+                        (audiobuffer + org_waud),
+                        len / abps);
             }
-        }
-    }
 
-    if (internal_vol && SWVolume())
-    {
-        int bdiff = kAudioRingBufferSize - waud;
-        bool music = (timecode < 1);
+            int nSamples = pSoundStretch->numSamples();
+            len = WaitForFreeSpace(nSamples); 
 
-        if (bdiff < len)
-        {
-            AdjustVolume(audiobuffer + waud, bdiff, music);
-            AdjustVolume(audiobuffer, len - bdiff, music);
-        }
-        else
-            AdjustVolume(audiobuffer + waud, len, music);
-    }
+            while ((nSamples = pSoundStretch->numSamples())) 
+            {
+                if (nSamples > nSamplesToEnd) 
+                    nSamples = nSamplesToEnd;
 
-    // Encode to AC-3? 
-    if (encoder) 
-    {
-        org_waud = waud;
-        int bdiff = kAudioRingBufferSize - org_waud;
-        int to_get = 0;
+                nSamples = pSoundStretch->receiveSamples(
+                        (soundtouch::SAMPLETYPE*)
+                        (audiobuffer + org_waud), nSamples
+                        );
 
-        if (bdiff < len) 
+                if (nSamples == nSamplesToEnd) {
+                    org_waud = 0;
+                    nSamplesToEnd = kAudioRingBufferSize/abps;
+                }
+                else {
+                    org_waud += nSamples * abps;
+                    nSamplesToEnd -= nSamples;
+                }
+            }
+        }
+
+        if (internal_vol && SWVolume())
         {
-            encoder->Encode(audiobuffer + org_waud, bdiff);
-            to_get = encoder->Encode(audiobuffer, len - bdiff);
+            int bdiff = kAudioRingBufferSize - waud;
+            bool music = (timecode < 1);
+
+            if (bdiff < len)
+            {
+                AdjustVolume(audiobuffer + waud, bdiff, music);
+                AdjustVolume(audiobuffer, len - bdiff, music);
+            }
+            else
+                AdjustVolume(audiobuffer + waud, len, music);
         }
-        else 
-            to_get = encoder->Encode(audiobuffer + org_waud, len);
 
-        if (to_get > 0) 
+        // Encode to AC-3? 
+        if (encoder) 
         {
-            if (to_get >= bdiff)
+            org_waud = waud;
+            int bdiff = kAudioRingBufferSize - org_waud;
+            int to_get = 0;
+
+            if (bdiff < len) 
             {
-                encoder->GetFrames(audiobuffer + org_waud, bdiff);
-                to_get -= bdiff;
-                org_waud = 0;
+                encoder->Encode(audiobuffer + org_waud, bdiff);
+                to_get = encoder->Encode(audiobuffer, len - bdiff);
             }
-            if (to_get > 0)
-                encoder->GetFrames(audiobuffer + org_waud, to_get);
+            else 
+                to_get = encoder->Encode(audiobuffer + org_waud, len);
+
+            if (to_get > 0) 
+            {
+                if (to_get >= bdiff)
+                {
+                    encoder->GetFrames(audiobuffer + org_waud, bdiff);
+                    to_get -= bdiff;
+                    org_waud = 0;
+                }
+                if (to_get > 0)
+                    encoder->GetFrames(audiobuffer + org_waud, to_get);
 
-            org_waud += to_get;
+                org_waud += to_get;
+            }
         }
-    }
 
-    waud = org_waud;
-    lastaudiolen = audiolen(false);
+        waud = org_waud;
+        lastaudiolen = audiolen(false);
+    }
 
     if (timecode < 0)
         // mythmusic doesn't give timestamps..
-        timecode = (int)((samples_buffered * 100000.0) / effdsp);
+        timecode = (int64_t)((samples_buffered * EFF_FACTOR_I) / effdsp);
 
-    samples_buffered += samples;
+    samples_buffered += in_samples;
 
     /* we want the time at the end -- but the file format stores
        time at the start of the chunk. */
     // even with timestretch, timecode is still calculated from original
     // sample count
-    audbuf_timecode = timecode + (int)((samples * 100000.0) / effdsp);
+    audbuf_timecode = timecode + (int64_t)((in_samples * EFF_FACTOR_I) / effdsp);
 }
 
 void AudioOutputBase::Status()
@@ -1237,6 +1250,12 @@ void AudioOutputBase::OutputAudioLoop(void)
     unsigned char *zeros    = new unsigned char[fragment_size];
     unsigned char *fragment = new unsigned char[fragment_size];
 
+    // to reduce startup latency, write silence in 8ms chunks
+    int zero_fragment_size = (int)(0.008*audio_samplerate/audio_channels);
+    zero_fragment_size *= audio_channels * audio_bits / 16;   // make sure its a multiple of audio_channels
+    if (zero_fragment_size > fragment_size)
+        zero_fragment_size = fragment_size;
+
     bzero(zeros, fragment_size);
     last_space_on_soundcard = 0;
 
@@ -1269,11 +1288,11 @@ void AudioOutputBase::OutputAudioLoop(void)
 
             // only send zeros if card doesn't already have at least one
             // fragment of zeros -dag
-            if (fragment_size >= soundcard_buffer_size - space_on_soundcard)
+            if (zero_fragment_size >= soundcard_buffer_size - space_on_soundcard)
             {
-                if (fragment_size <= space_on_soundcard) 
+                if (zero_fragment_size <= space_on_soundcard) 
                 {
-                    WriteAudio(zeros, fragment_size);
+                    WriteAudio(zeros, zero_fragment_size);
                 }
                 else 
                 {
@@ -1281,7 +1300,7 @@ void AudioOutputBase::OutputAudioLoop(void)
                     VERBOSE(VB_AUDIO+VB_TIMESTAMP, LOC +
                             QString("waiting for space on soundcard "
                                     "to write zeros: have %1 need %2")
-                            .arg(space_on_soundcard).arg(fragment_size));
+                            .arg(space_on_soundcard).arg(zero_fragment_size));
                     usleep(5000);
                 }
             }
diff --git a/mythtv/libs/libmyth/audiooutputbase.h b/mythtv/libs/libmyth/audiooutputbase.h
index b962a60..43e18cc 100644
--- a/mythtv/libs/libmyth/audiooutputbase.h
+++ b/mythtv/libs/libmyth/audiooutputbase.h
@@ -52,18 +52,18 @@ class AudioOutputBase : public AudioOutput, public QThread
     int GetSWVolume(void);
 
     // timecode is in milliseconds.
-    virtual bool AddSamples(char *buffer, int samples, long long timecode);
-    virtual bool AddSamples(char *buffers[], int samples, long long timecode);
+    virtual bool AddSamples(char *buffer, int samples, int64_t timecode);
+    virtual bool AddSamples(char *buffers[], int samples, int64_t timecode);
 
-    virtual void SetTimecode(long long timecode);
+    virtual void SetTimecode(int64_t timecode);
     virtual bool IsPaused(void) const { return audio_actually_paused; }
     virtual void Pause(bool paused);
 
     // Wait for all data to finish playing
     virtual void Drain(void);
 
-    virtual int GetAudiotime(void);
-    virtual int GetAudioBufferedTime(void);
+    virtual int64_t GetAudiotime(void);
+    virtual int64_t GetAudioBufferedTime(void);
 
     // Send output events showing current progress
     virtual void Status(void);
@@ -101,7 +101,7 @@ class AudioOutputBase : public AudioOutput, public QThread
 
     int GetAudioData(unsigned char *buffer, int buf_size, bool fill_buffer);
 
-    void _AddSamples(void *buffer, bool interleaved, int samples, long long timecode);
+    void _AddSamples(void *buffer, bool interleaved, int samples, int64_t timecode);
 
     void OutputAudioLoop(void);
 
@@ -118,10 +118,10 @@ class AudioOutputBase : public AudioOutput, public QThread
 
     void SetStretchFactorLocked(float factor);
 
-    int GetBaseAudioTime()                    const { return audiotime;       }
-    int GetBaseAudBufTimeCode()               const { return audbuf_timecode; }
+    int64_t GetBaseAudioTime()                const { return audiotime;       }
+    int64_t GetBaseAudBufTimeCode()           const { return audbuf_timecode; }
     soundtouch::SoundTouch *GetSoundStretch() const { return pSoundStretch;   }
-    void SetBaseAudioTime(const int inAudioTime) { audiotime = inAudioTime; }
+    void SetBaseAudioTime(const int64_t inAudioTime) { audiotime = inAudioTime; }
 
   protected:
     int effdsp; // from the recorded stream
@@ -144,6 +144,7 @@ class AudioOutputBase : public AudioOutput, public QThread
     bool audio_reenc;
 
     float audio_stretchfactor;
+    int  eff_audio_stretchfactor;     // scaled to 10000 as effdsp is
     AudioOutputSource source;
 
     bool killaudio;
@@ -188,7 +189,7 @@ class AudioOutputBase : public AudioOutput, public QThread
     bool timed_blocking; // do AddSamples calls block?
 
     int lastaudiolen;
-    long long samples_buffered;
+    int64_t samples_buffered;
 
     bool audio_thread_exists;
 
@@ -205,13 +206,13 @@ class AudioOutputBase : public AudioOutput, public QThread
     QMutex avsync_lock;
 
     /// timecode of audio leaving the soundcard (same units as timecodes)
-    long long audiotime;
+    int64_t audiotime;
     struct timeval audiotime_updated; // ... which was last updated at this time
 
     /* Audio circular buffer */
     int raud, waud;     /* read and write positions */
     /// timecode of audio most recently placed into buffer
-    long long audbuf_timecode;
+    int64_t audbuf_timecode;
 
     int numlowbuffer;
 
diff --git a/mythtv/libs/libmythtv/NuppelVideoPlayer.cpp b/mythtv/libs/libmythtv/NuppelVideoPlayer.cpp
index 0da4da0..0d53930 100644
--- a/mythtv/libs/libmythtv/NuppelVideoPlayer.cpp
+++ b/mythtv/libs/libmythtv/NuppelVideoPlayer.cpp
@@ -206,7 +206,9 @@ NuppelVideoPlayer::NuppelVideoPlayer(bool muted)
       videosync(NULL),              delay(0),
       vsynctol(30/4),               avsync_delay(0),
       avsync_adjustment(0),         avsync_avg(0),
-      avsync_oldavg(0),             refreshrate(0),
+      avsync_oldavg(0),             
+      avsync_predictor(0),          avsync_predictor_enabled(false),
+      refreshrate(0),
       lastsync(false),              m_playing_slower(false),
       m_stored_audio_stretchfactor(1.0),
       audio_paused(false),
@@ -238,6 +240,7 @@ NuppelVideoPlayer::NuppelVideoPlayer(bool muted)
     db_prefer708     = gContext->GetNumSetting("Prefer708Captions", 1);
     autocommercialskip = (CommSkipMode)
         gContext->GetNumSetting("AutoCommercialSkip", kCommSkipOff);
+    usesmoothsync    = gContext->GetNumSetting("UseSmoothSync", 1) != 0;
 
     lastIgnoredManualSkip = QDateTime::currentDateTime().addSecs(-10);
 
@@ -1120,7 +1123,7 @@ void NuppelVideoPlayer::SetVideoParams(int width, int height, double fps,
         video_frame_rate = fps;
         float temp_speed = (play_speed == 0.0f) ?
             audio_stretchfactor : play_speed;
-        frame_interval = (int)(1000000.0f / video_frame_rate / temp_speed);
+        SetFrameInterval(kScan_Progressive, 1.0 / (video_frame_rate * temp_speed));
     }
 
     if (videoOutput)
@@ -2312,6 +2315,33 @@ float NuppelVideoPlayer::WarpFactor(void)
     return divergence;
 }
 
+void NuppelVideoPlayer::SetFrameInterval(FrameScanType scan, double frame_period)
+{
+    frame_interval = (int)(1000000.0f * frame_period + 0.5f);
+    avsync_predictor = 0;
+    avsync_predictor_enabled = false;
+
+    VERBOSE(VB_PLAYBACK, LOC + QString("SetFrameInterval ps:%1 scan:%2 usesmoothsync:%3")
+            .arg(play_speed).arg(scan).arg(usesmoothsync)
+           );
+    //if (play_speed <= 1 || play_speed > 2 || scan != kScan_Progressive || !usesmoothsync)
+    if (play_speed < 1 || play_speed > 2 || refreshrate <= 0 || !usesmoothsync)
+        return;
+
+    avsync_predictor_enabled = ((frame_interval-(frame_interval/200)) < refreshrate);
+}
+
+void NuppelVideoPlayer::ResetAVSync(void)
+{
+    avsync_avg = 0;
+    avsync_oldavg = 0;
+    avsync_predictor = 0;
+    prevtc = 0;
+    warpfactor = 1.0f;
+    warpfactor_avg = 1.0f;
+    VERBOSE(VB_PLAYBACK+VB_TIMESTAMP, LOC + "A/V sync reset");
+}
+
 void NuppelVideoPlayer::InitAVSync(void)
 {
     videosync->Start();
@@ -2333,16 +2363,33 @@ void NuppelVideoPlayer::InitAVSync(void)
         VERBOSE(VB_GENERAL, msg);
         msg = QString("Refresh rate: %1, frame interval: %2")
                        .arg(refreshrate).arg(frame_interval);
-        VERBOSE(VB_PLAYBACK, msg);
+        VERBOSE(VB_PLAYBACK, LOC + msg);
+
+        SetFrameInterval(m_scan, 1.0 / (video_frame_rate * play_speed));
 
         // try to get preferential scheduling, but ignore if we fail to.
         myth_nice(-19);
     }
 }
 
+int64_t NuppelVideoPlayer::AVSyncGetAudiotime(void)
+{
+    int64_t currentaudiotime = 0;
+    audio_lock.lock();
+    if (audioOutput && normal_speed)
+    {
+        currentaudiotime = audioOutput->GetAudiotime();
+    }
+    audio_lock.unlock();
+    return currentaudiotime;
+}
+
 void NuppelVideoPlayer::AVSync(void)
 {
     float diverge = 0.0f;
+    int vsync_delay_clock = 0;
+    int64_t currentaudiotime = 0;
+
     // attempt to reduce fps for standalone PIP
     if (player_ctx->IsPIP() && framesPlayed % 2)
     {
@@ -2382,16 +2429,38 @@ void NuppelVideoPlayer::AVSync(void)
         ps = kScan_Progressive;
 
     bool dropframe = false;
+    QString dbg;
+
+    if (avsync_predictor_enabled)
+    {
+        avsync_predictor += frame_interval;
+        if (avsync_predictor >= refreshrate)
+        {
+            int refreshperiodsinframe = avsync_predictor/refreshrate;
+            avsync_predictor -= refreshrate * refreshperiodsinframe;
+        }
+        else
+        {
+            dropframe = true;
+            dbg = "A/V predict drop frame, ";
+        }
+    }
+
     if (diverge < -MAXDIVERGE)
     {
         dropframe = true;
         // If video is way behind of audio, adjust for it...
-        QString dbg = QString("Video is %1 frames behind audio (too slow), ")
+        dbg = QString("Video is %1 frames behind audio (too slow), ")
             .arg(-diverge);
+    }
 
+    if (dropframe)
+    {
         // Reset A/V Sync
         lastsync = true;
 
+        currentaudiotime = AVSyncGetAudiotime();
+
         if (buffer && !using_null_videoout &&
             videoOutput->hasHWAcceleration() &&
            !videoOutput->IsSyncLocked())
@@ -2416,16 +2485,17 @@ void NuppelVideoPlayer::AVSync(void)
         if (buffer)
             videoOutput->PrepareFrame(buffer, ps);
 
-        VERBOSE(VB_PLAYBACK|VB_TIMESTAMP, QString("AVSync waitforframe %1 %2")
+        VERBOSE(VB_PLAYBACK|VB_TIMESTAMP, LOC + QString("AVSync waitforframe %1 %2")
                 .arg(avsync_adjustment).arg(m_double_framerate));
-        videosync->WaitForFrame(avsync_adjustment + repeat_delay);
-        VERBOSE(VB_PLAYBACK|VB_TIMESTAMP, "AVSync show");
+        vsync_delay_clock = videosync->WaitForFrame(avsync_adjustment + repeat_delay);
+        currentaudiotime = AVSyncGetAudiotime();
+        VERBOSE(VB_PLAYBACK|VB_TIMESTAMP, LOC + "AVSync show");
         if (!resetvideo)
             videoOutput->Show(ps);
 
         if (videoOutput->IsErrored())
         {
-            VERBOSE(VB_IMPORTANT, "NVP: Error condition detected "
+            VERBOSE(VB_IMPORTANT, LOC + "Error condition detected "
                     "in videoOutput after Show(), aborting playback.");
             SetErrored(QObject::tr("Serious error detected in Video Output"));
             return;
@@ -2461,9 +2531,9 @@ void NuppelVideoPlayer::AVSync(void)
             // Display the second field
             videosync->AdvanceTrigger();
 #ifdef NEW_AVSYNC
-            videosync->WaitForFrame(avsync_adjustment);
+            vsync_delay_clock = videosync->WaitForFrame(avsync_adjustment);
 #else
-            videosync->WaitForFrame(0);
+            vsync_delay_clock = videosync->WaitForFrame(0);
 #endif
             if (!resetvideo)
             {
@@ -2474,17 +2544,18 @@ void NuppelVideoPlayer::AVSync(void)
         repeat_delay = frame_interval * buffer->repeat_pict * 0.5;
 
         if (repeat_delay)
-            VERBOSE(VB_TIMESTAMP, QString("A/V repeat_pict, adding %1 repeat "
+            VERBOSE(VB_TIMESTAMP, LOC + QString("A/V repeat_pict, adding %1 repeat "
                     "delay").arg(repeat_delay));
     }
     else
     {
-        videosync->WaitForFrame(0);
+        vsync_delay_clock = videosync->WaitForFrame(0);
+        currentaudiotime = AVSyncGetAudiotime();
     }
 
     if (output_jmeter && output_jmeter->RecordCycleTime())
     {
-        VERBOSE(VB_PLAYBACK+VB_TIMESTAMP, QString("A/V avsync_delay: %1, "
+        VERBOSE(VB_PLAYBACK+VB_TIMESTAMP, LOC + QString("A/V avsync_delay: %1, "
                 "avsync_avg: %2, warpfactor: %3, warpfactor_avg: %4")
                 .arg(avsync_delay / 1000).arg(avsync_avg / 1000)
                 .arg(warpfactor).arg(warpfactor_avg));
@@ -2500,7 +2571,9 @@ void NuppelVideoPlayer::AVSync(void)
         // by cutting the frame rate in half for the length of this frame
 
 #ifdef NEW_AVSYNC
-        avsync_adjustment = refreshrate;
+        //avsync_adjustment = refreshrate;
+        avsync_adjustment = frame_interval;
+        //avsync_adjustment = frame_interval*(((int)MAXDIVERGE)-1);
 #else
         avsync_adjustment = frame_interval;
 #endif
@@ -2510,44 +2583,49 @@ void NuppelVideoPlayer::AVSync(void)
                         "\t\t\tdoubling video frame interval to slow down.").arg(diverge));
     }
 
-    audio_lock.lock();
     if (audioOutput && normal_speed)
     {
-        long long currentaudiotime = audioOutput->GetAudiotime();
-        audio_lock.unlock();
 #if 1
-        VERBOSE(VB_PLAYBACK+VB_TIMESTAMP, QString(
+        VERBOSE(VB_PLAYBACK+VB_TIMESTAMP, LOC + QString(
                     "A/V timecodes audio %1 video %2 frameinterval %3 "
-                    "avdel %4 avg %5 tcoffset %6")
+                    "avdel %4 avg %5 tcoffset %6"
+                    " avp %7 avpen %8"
+                    " avdc %9"
+                    )
                 .arg(currentaudiotime)
                 .arg(buffer->timecode)
                 .arg(frame_interval)
-                .arg(buffer->timecode - currentaudiotime)
+                .arg(buffer->timecode - currentaudiotime - (int)(vsync_delay_clock*audio_stretchfactor+500)/1000)
                 .arg(avsync_avg)
                 .arg(tc_wrap[TC_AUDIO])
+                .arg(avsync_predictor)
+                .arg(avsync_predictor_enabled)
+                .arg(vsync_delay_clock)
                  );
 #endif
         if (currentaudiotime != 0 && buffer->timecode != 0)
         { // currentaudiotime == 0 after a seek
             // The time at the start of this frame (ie, now) is given by
             // last->timecode
-            int delta = (int)((buffer->timecode - prevtc)/play_speed) - (frame_interval / 1000);
-            prevtc = buffer->timecode;
-            //cerr << delta << " ";
-
-            // If the timecode is off by a frame (dropped frame) wait to sync
-            if (delta > (int) frame_interval / 1200 &&
-                delta < (int) frame_interval / 1000 * 3 &&
-                prevrp == 0)
+            if (prevtc != 0)
             {
-                //cerr << "+ ";
-                videosync->AdvanceTrigger();
-                if (m_double_framerate)
+                int delta = (int)((buffer->timecode - prevtc)/play_speed) - (frame_interval / 1000);
+                // If the timecode is off by a frame (dropped frame) wait to sync
+                if (delta > (int) frame_interval / 1200 &&
+                    delta < (int) frame_interval / 1000 * 3 &&
+                    prevrp == 0)
+                {
+                    //cerr << "+ ";
+                    VERBOSE(VB_PLAYBACK+VB_TIMESTAMP, LOC + QString("A/V delay %1").arg(delta));
                     videosync->AdvanceTrigger();
+                    if (m_double_framerate)
+                        videosync->AdvanceTrigger();
+                }
             }
+            prevtc = buffer->timecode;
             prevrp = buffer->repeat_pict;
 
-            avsync_delay = (buffer->timecode - currentaudiotime) * 1000;//usec
+            avsync_delay = (buffer->timecode - currentaudiotime) * 1000 - (int)(vsync_delay_clock*audio_stretchfactor);  //usec
             // prevents major jitter when pts resets during dvd title
             if (avsync_delay > 2000000 && player_ctx->buffer->isDVD())
                 avsync_delay = 90000;
@@ -2557,15 +2635,17 @@ void NuppelVideoPlayer::AVSync(void)
                the video by one interlaced field (1/2 frame) */
             if (!lastsync)
             {
-                if (avsync_avg > frame_interval * 3 / 2)
+                if (avsync_delay > refreshrate)
                 {
-                    avsync_adjustment = refreshrate;
-                    lastsync = true;
+                    avsync_adjustment += refreshrate;
+                    //lastsync = true;
+                    VERBOSE(VB_PLAYBACK+VB_TIMESTAMP, LOC + "A/V avg high extend");
                 }
-                else if (avsync_avg < 0 - frame_interval * 3 / 2)
+                else if (avsync_delay < 0 - refreshrate)
                 {
-                    avsync_adjustment = -refreshrate;
-                    lastsync = true;
+                    avsync_adjustment -= refreshrate;
+                    //lastsync = true;
+                    VERBOSE(VB_PLAYBACK+VB_TIMESTAMP, LOC + "A/V avg high skip");
                 }
             }
             else
@@ -2573,12 +2653,13 @@ void NuppelVideoPlayer::AVSync(void)
         }
         else
         {
-            avsync_avg = 0;
-            avsync_oldavg = 0;
+            ResetAVSync();
         }
     }
     else
-        audio_lock.unlock();
+    {
+        VERBOSE(VB_PLAYBACK+VB_TIMESTAMP, LOC + QString("A/V no sync proc ns:%1 ao:%2").arg(normal_speed).arg(audioOutput != NULL));
+    }
 }
 
 void NuppelVideoPlayer::DisplayPauseFrame(void)
@@ -4227,7 +4308,7 @@ void NuppelVideoPlayer::DoPause(void)
     }
 
     float temp_speed = audio_stretchfactor;
-    frame_interval = (int)(1000000.0 * ffrew_skip / video_frame_rate / temp_speed);
+    SetFrameInterval(m_scan, ffrew_skip / (video_frame_rate * temp_speed));
     VERBOSE(VB_PLAYBACK, QString("rate: %1 speed: %2 skip: %3 = interval %4")
                                  .arg(video_frame_rate).arg(temp_speed)
                                  .arg(ffrew_skip).arg(frame_interval));
@@ -4289,8 +4370,7 @@ void NuppelVideoPlayer::DoPlay(void)
         ClearAfterSeek();
     }
 
-    frame_interval = (int) (1000000.0f * ffrew_skip / video_frame_rate /
-                            play_speed);
+    SetFrameInterval(m_scan, ffrew_skip / (video_frame_rate * play_speed));
 
     VERBOSE(VB_PLAYBACK, LOC + "DoPlay: " +
             QString("rate: %1 speed: %2 skip: %3 => new interval %4")
@@ -4688,6 +4768,7 @@ void NuppelVideoPlayer::ClearAfterSeek(bool clearvideobuffers)
         savedAudioTimecodeOffset = 0;
     }
 
+    ResetAVSync();
     SetPrebuffering(true);
     audio_lock.lock();
     if (audioOutput)
diff --git a/mythtv/libs/libmythtv/NuppelVideoPlayer.h b/mythtv/libs/libmythtv/NuppelVideoPlayer.h
index d19ff73..dcd90f6 100644
--- a/mythtv/libs/libmythtv/NuppelVideoPlayer.h
+++ b/mythtv/libs/libmythtv/NuppelVideoPlayer.h
@@ -519,6 +519,9 @@ class MPUBLIC NuppelVideoPlayer : public CC608Reader, public CC708Reader
     float WarpFactor(void);
     void  WrapTimecode(long long &timecode, TCTypes tc_type);
     void  InitAVSync(void);
+    void  ResetAVSync(void);
+    int64_t AVSyncGetAudiotime(void);
+    void  SetFrameInterval(FrameScanType scan, double speed);
     void  AVSync(void);
     void  FallbackDeint(void);
     void  CheckExtraAudioDecode(void);
@@ -805,6 +808,9 @@ class MPUBLIC NuppelVideoPlayer : public CC608Reader, public CC708Reader
     int        avsync_adjustment;
     int        avsync_avg;
     int        avsync_oldavg;
+    bool       usesmoothsync;
+    int        avsync_predictor;
+    bool       avsync_predictor_enabled;
     int        refreshrate;
     bool       lastsync;
     bool       m_playing_slower;
diff --git a/mythtv/libs/libmythtv/avformatdecoder.cpp b/mythtv/libs/libmythtv/avformatdecoder.cpp
index e6af2d5..0f39821 100644
--- a/mythtv/libs/libmythtv/avformatdecoder.cpp
+++ b/mythtv/libs/libmythtv/avformatdecoder.cpp
@@ -481,6 +481,7 @@ AvFormatDecoder::AvFormatDecoder(NuppelVideoPlayer *parent,
       start_code_state(0xffffffff),
       lastvpts(0),                  lastapts(0),
       lastccptsu(0),
+      firstvpts(0),                 firstvptsinuse(false),
       using_null_videoout(use_null_videoout),
       video_codec_id(kCodec_NONE),
       no_hardware_decoders(no_hardware_decode),
@@ -929,6 +930,12 @@ void AvFormatDecoder::SeekReset(long long newKey, uint skipFrames,
         if (decoded_video_frame)
             GetNVP()->DiscardVideoFrame(decoded_video_frame);
     }
+
+    if (doflush)
+    {
+        firstvpts = 0;
+        firstvptsinuse = true;
+    }
 }
 
 void AvFormatDecoder::Reset(bool reset_video_data, bool seek_reset)
@@ -2929,7 +2936,9 @@ void AvFormatDecoder::MpegPreProcessPkt(AVStream *stream, AVPacket *pkt)
 
                 gopset = false;
                 prevgoppos = 0;
+                firstvpts =
                 lastapts = lastvpts = lastccptsu = 0;
+                firstvptsinuse = true;
 
                 // fps debugging info
                 float avFPS = normalized_fps(stream, context);
@@ -3039,7 +3048,9 @@ bool AvFormatDecoder::H264PreProcessPkt(AVStream *stream, AVPacket *pkt)
 
             gopset = false;
             prevgoppos = 0;
+            firstvpts =
             lastapts = lastvpts = lastccptsu = 0;
+            firstvptsinuse = true;
 
             // fps debugging info
             float avFPS = normalized_fps(stream, context);
@@ -3261,6 +3272,8 @@ bool AvFormatDecoder::ProcessVideoPacket(AVStream *curstream, AVPacket *pkt)
     framesPlayed++;
 
     lastvpts = temppts;
+    if (!firstvpts && firstvptsinuse)
+        firstvpts = temppts;
 
     return true;
 }
@@ -4029,6 +4042,16 @@ bool AvFormatDecoder::ProcessAudioPacket(AVStream *curstream, AVPacket *pkt,
                 skipaudio = false;
         }
 
+        // skip any audio frames preceding first video frame
+        if (firstvptsinuse && firstvpts && (lastapts < firstvpts))
+        {
+            VERBOSE(VB_PLAYBACK+VB_TIMESTAMP,
+                LOC + QString("discarding early audio timecode %1 %2 %3")
+                .arg(pkt->pts).arg(pkt->dts).arg(lastapts));
+            break;
+        }
+        firstvptsinuse = false;
+
         avcodeclock->lock();
         data_size = 0;
 
diff --git a/mythtv/libs/libmythtv/avformatdecoder.h b/mythtv/libs/libmythtv/avformatdecoder.h
index 90b8f58..f48bc18 100644
--- a/mythtv/libs/libmythtv/avformatdecoder.h
+++ b/mythtv/libs/libmythtv/avformatdecoder.h
@@ -262,6 +262,8 @@ class AvFormatDecoder : public DecoderBase
     long long lastvpts;
     long long lastapts;
     long long lastccptsu;
+    long long firstvpts;
+    bool      firstvptsinuse;
 
     bool using_null_videoout;
     MythCodecID video_codec_id;
diff --git a/mythtv/libs/libmythtv/vsync.cpp b/mythtv/libs/libmythtv/vsync.cpp
index 060402d..4d6a2d2 100644
--- a/mythtv/libs/libmythtv/vsync.cpp
+++ b/mythtv/libs/libmythtv/vsync.cpp
@@ -123,9 +123,10 @@ VideoSync::VideoSync(VideoOutput *video_output,
                      bool halve_frame_interval) :
     m_video_output(video_output),   m_frame_interval(frameint),
     m_refresh_interval(refreshint), m_interlaced(halve_frame_interval),
-    m_delay(-1)
+    m_nexttrigger(0),
+    m_delay(-1),
+    m_synchronous(false)
 {
-    bzero(&m_nexttrigger, sizeof(m_nexttrigger));
 
     int tolerance = m_refresh_interval / 200;
     if (m_interlaced && m_refresh_interval > ((m_frame_interval/2) + tolerance))
@@ -136,7 +137,9 @@ VideoSync::VideoSync(VideoOutput *video_output,
 
 void VideoSync::Start(void)
 {
-    gettimeofday(&m_nexttrigger, NULL); // now
+    struct timeval now_tv;
+    gettimeofday(&now_tv, NULL); // now
+    m_nexttrigger = now_tv.tv_sec * 1000000LL + now_tv.tv_usec;
 }
 
 /** \fn VideoSync::SetFrameInterval(int fr, bool intr)
@@ -147,26 +150,14 @@ void VideoSync::SetFrameInterval(int fr, bool intr)
     m_frame_interval = fr;
     m_interlaced = intr;
     int tolerance = m_refresh_interval / 200;
+    double sync_factor = fr * 2.0f / intr;
+    sync_factor = sync_factor - round(sync_factor);
+    m_synchronous = (sync_factor >= -0.005) && (sync_factor <= 0.005);
     if (m_interlaced && m_refresh_interval > ((m_frame_interval/2) + tolerance))
         m_interlaced = false; // can't display both fields at 2x rate
 
-    VERBOSE(VB_PLAYBACK, QString("Set video sync frame interval to %1")
-                                 .arg(m_frame_interval));
-}
-
-void VideoSync::OffsetTimeval(struct timeval& tv, int offset)
-{
-    tv.tv_usec += offset;
-    while (tv.tv_usec > 999999)
-    {
-        tv.tv_sec++;
-        tv.tv_usec -= 1000000;
-    }
-    while (tv.tv_usec < 0)
-    {
-        tv.tv_sec--;
-        tv.tv_usec += 1000000;
-    }
+    VERBOSE(VB_PLAYBACK, QString("Set video sync frame interval to %1 (synced:%2)")
+                                 .arg(m_frame_interval).arg(m_synchronous));
 }
 
 /** \fn VideoSync::UpdateNexttrigger()
@@ -179,9 +170,9 @@ void VideoSync::UpdateNexttrigger()
     // Offset by frame interval -- if interlaced, only delay by half
     // frame interval
     if (m_interlaced)
-        OffsetTimeval(m_nexttrigger, m_frame_interval/2);
+        m_nexttrigger += m_frame_interval/2;
     else
-        OffsetTimeval(m_nexttrigger, m_frame_interval);
+        m_nexttrigger += m_frame_interval;
 }
 
 /** \fn VideoSync::CalcDelay()
@@ -197,13 +188,13 @@ void VideoSync::UpdateNexttrigger()
  */
 int VideoSync::CalcDelay()
 {
-    struct timeval now;
-    gettimeofday(&now, NULL);
+    struct timeval now_tv;
+    gettimeofday(&now_tv, NULL);
     //cout << "CalcDelay: next: " << timeval_str(m_nexttrigger) << " now "
     // << timeval_str(now) << endl;
+    int64_t now = now_tv.tv_sec * 1000000LL + now_tv.tv_usec;
 
-    int ret_val = (m_nexttrigger.tv_sec - now.tv_sec) * 1000000 +
-                  (m_nexttrigger.tv_usec - now.tv_usec);
+    int ret_val = m_nexttrigger - now;
 
     //cout << "delay " << ret_val << endl;
 
@@ -215,9 +206,8 @@ int VideoSync::CalcDelay()
             ret_val = m_frame_interval * 4;
 
         // set nexttrigger to our new target time
-        m_nexttrigger.tv_sec = now.tv_sec;
-        m_nexttrigger.tv_usec = now.tv_usec;
-        OffsetTimeval(m_nexttrigger, ret_val);
+        m_nexttrigger = now;
+        m_nexttrigger += ret_val;
     }
 
     if (ret_val < -m_frame_interval)
@@ -225,9 +215,8 @@ int VideoSync::CalcDelay()
         ret_val = -m_frame_interval;
 
         // set nexttrigger to our new target time
-        m_nexttrigger.tv_sec = now.tv_sec;
-        m_nexttrigger.tv_usec = now.tv_usec;
-        OffsetTimeval(m_nexttrigger, ret_val);
+        m_nexttrigger = now;
+        m_nexttrigger += ret_val;
     }
 
     return ret_val;
@@ -244,10 +233,20 @@ int VideoSync::CalcDelay()
 void VideoSync::KeepPhase()
 {
     // cerr << m_delay << endl;
-    if (m_delay < -(m_refresh_interval/2))
-        OffsetTimeval(m_nexttrigger, 200);
-    else if (m_delay > -500)
-        OffsetTimeval(m_nexttrigger, -2000);
+    if (m_synchronous)
+    {
+        if (m_delay < -(m_refresh_interval - 500))
+            m_nexttrigger += 200;
+        else if (m_delay > -500)
+            m_nexttrigger += -2000;
+    }
+    else
+    {
+        if (m_delay < -(m_refresh_interval + 500))
+            m_nexttrigger += 200;
+        else if (m_delay >= 0)
+            m_nexttrigger += -2000;
+    }
 }
 
 #ifndef _WIN32
@@ -337,10 +336,10 @@ void DRMVideoSync::Start(void)
     VideoSync::Start();
 }
 
-void DRMVideoSync::WaitForFrame(int sync_delay)
+int DRMVideoSync::WaitForFrame(int sync_delay)
 {
     // Offset for externally-provided A/V sync delay
-    OffsetTimeval(m_nexttrigger, sync_delay);
+    m_nexttrigger += sync_delay;
 
     m_delay = CalcDelay();
     //cerr << "WaitForFrame at : " << m_delay;
@@ -360,7 +359,7 @@ void DRMVideoSync::WaitForFrame(int sync_delay)
     if (m_delay > 0)
     {
         // Wait for any remaining retrace intervals in one pass.
-        int n = m_delay / m_refresh_interval + 1;
+        int n = (m_delay + m_refresh_interval - 1) / m_refresh_interval;
 
         drm_wait_vblank_t blank;
         blank.request.type = DRM_VBLANK_RELATIVE;
@@ -370,6 +369,7 @@ void DRMVideoSync::WaitForFrame(int sync_delay)
         //cerr << "Wait " << n << " intervals. Count " << blank.request.sequence;
         //cerr  << " Delay " << m_delay << endl;
     }
+    return m_delay;
 }
 
 void DRMVideoSync::AdvanceTrigger(void)
@@ -497,12 +497,16 @@ void OpenGLVideoSync::Start(void)
 #endif /* USING_OPENGL_VSYNC */
 }
 
-void OpenGLVideoSync::WaitForFrame(int sync_delay)
+int OpenGLVideoSync::WaitForFrame(int sync_delay)
 {
     (void) sync_delay;
 #ifdef USING_OPENGL_VSYNC
+//#define GLVSYNCDEBUG
+#ifdef GLVSYNCDEBUG
+    int refreshcount = 0;
+#endif
     const QString msg1("First A/V Sync"), msg2("Second A/V Sync");
-    OffsetTimeval(m_nexttrigger, sync_delay);
+    m_nexttrigger += sync_delay;
 
     VideoOutput *vo = dynamic_cast<VideoOutput*>(m_video_output);
     if (vo && vo->IsEmbedding())
@@ -510,36 +514,68 @@ void OpenGLVideoSync::WaitForFrame(int sync_delay)
         m_delay = CalcDelay();
         if (m_delay > 0)
             usleep(m_delay);
-        return;
+        return 0;
     }
 
     int err;
     if (!m_context)
-        return;
+        return 0;
     unsigned int frameNum = 0;
 
     OpenGLContextLocker ctx_lock(m_context);
     err = gMythGLXGetVideoSyncSGI(&frameNum);
     checkGLSyncError("Frame Number Query", err);
 
+#ifdef GLVSYNCDEBUG
+    int delay1 = m_delay;
+    int delay2;
+#endif
     // Always sync to the next retrace execpt when we are very late.
     if ((m_delay = CalcDelay()) > -(m_refresh_interval/2))
     {
+#ifdef GLVSYNCDEBUG
+        delay2 = m_delay;
+#endif
         err = gMythGLXWaitVideoSyncSGI(2, (frameNum+1)%2 ,&frameNum);
         checkGLSyncError(msg1, err);
         m_delay = CalcDelay();
+#ifdef GLVSYNCDEBUG
+        refreshcount++;
+#endif
     }
+#ifdef GLVSYNCDEBUG
+    else
+        delay2 = m_delay;
+#endif
 
+#ifdef GLVSYNCDEBUG
+    int delay3 = m_delay;
+#endif
     // Wait for any remaining retrace intervals in one pass.
     if (m_delay > 0)
     {
-        uint n = m_delay / m_refresh_interval + 1;
+        uint n = (m_delay + m_refresh_interval - 1) / m_refresh_interval;
+#ifdef GLVSYNCDEBUG
+        refreshcount += (int)n;
+#endif
         err = gMythGLXWaitVideoSyncSGI((n+1), (frameNum+n)%(n+1), &frameNum);
         checkGLSyncError(msg2, err);
         m_delay = CalcDelay();
     }
+#ifdef GLVSYNCDEBUG
+    VERBOSE(VB_PLAYBACK+VB_TIMESTAMP, QString("VS: WFF: ri:%1 fi:%2 delay1:%3 delay2:%4 delay3:%5 skip:%6 finaldelay:%7")
+            .arg(m_refresh_interval)
+            .arg(m_frame_interval)
+            .arg(delay1)
+            .arg(delay2)
+            .arg(delay3)
+            .arg(refreshcount)
+            .arg(m_delay)
+           );
+#endif
 
 #endif /* USING_OPENGL_VSYNC */
+    return m_delay;
 }
 
 void OpenGLVideoSync::AdvanceTrigger(void)
@@ -548,6 +584,9 @@ void OpenGLVideoSync::AdvanceTrigger(void)
 
     KeepPhase();
     UpdateNexttrigger();
+#ifdef GLVSYNCDEBUG
+    VERBOSE(VB_PLAYBACK+VB_TIMESTAMP, "VS: AdvanceTrigger");
+#endif
 #endif /* USING_OPENGL_VSYNC */
 }
 #endif /* !_WIN32 */
@@ -594,9 +633,9 @@ bool RTCVideoSync::TryInit(void)
     return true;
 }
 
-void RTCVideoSync::WaitForFrame(int sync_delay)
+int RTCVideoSync::WaitForFrame(int sync_delay)
 {
-    OffsetTimeval(m_nexttrigger, sync_delay);
+    m_nexttrigger += sync_delay;
 
     m_delay = CalcDelay();
 
@@ -609,6 +648,7 @@ void RTCVideoSync::WaitForFrame(int sync_delay)
         if ((val < 0) && (m_delay > 0))
             usleep(m_delay);
     }
+    return 0;
 }
 
 void RTCVideoSync::AdvanceTrigger(void)
@@ -637,10 +677,10 @@ bool VDPAUVideoSync::TryInit(void)
     return true;
 }
 
-void VDPAUVideoSync::WaitForFrame(int sync_delay)
+int VDPAUVideoSync::WaitForFrame(int sync_delay)
 {
     // Offset for externally-provided A/V sync delay
-    OffsetTimeval(m_nexttrigger, sync_delay);
+    m_nexttrigger += sync_delay;
     m_delay = CalcDelay();
 
     if (m_delay < 0)
@@ -648,6 +688,7 @@ void VDPAUVideoSync::WaitForFrame(int sync_delay)
 
     VideoOutputVDPAU *vo = (VideoOutputVDPAU *)(m_video_output);
     vo->SetNextFrameDisplayTimeOffset(m_delay);
+    return 0;
 }
 
 void VDPAUVideoSync::AdvanceTrigger(void)
@@ -674,10 +715,10 @@ bool BusyWaitVideoSync::TryInit(void)
     return true;
 }
 
-void BusyWaitVideoSync::WaitForFrame(int sync_delay)
+int BusyWaitVideoSync::WaitForFrame(int sync_delay)
 {
     // Offset for externally-provided A/V sync delay
-    OffsetTimeval(m_nexttrigger, sync_delay);
+    m_nexttrigger += sync_delay;
 
     m_delay = CalcDelay();
 
@@ -703,6 +744,7 @@ void BusyWaitVideoSync::WaitForFrame(int sync_delay)
         if (cnt > 1)
             m_cheat -= 200;
     }
+    return 0;
 }
 
 void BusyWaitVideoSync::AdvanceTrigger(void)
@@ -725,14 +767,15 @@ bool USleepVideoSync::TryInit(void)
     return true;
 }
 
-void USleepVideoSync::WaitForFrame(int sync_delay)
+int USleepVideoSync::WaitForFrame(int sync_delay)
 {
     // Offset for externally-provided A/V sync delay
-    OffsetTimeval(m_nexttrigger, sync_delay);
+    m_nexttrigger += sync_delay;
 
     m_delay = CalcDelay();
     if (m_delay > 0)
         usleep(m_delay);
+    return 0;
 }
 
 void USleepVideoSync::AdvanceTrigger(void)
diff --git a/mythtv/libs/libmythtv/vsync.h b/mythtv/libs/libmythtv/vsync.h
index f077949..f8b1c4b 100644
--- a/mythtv/libs/libmythtv/vsync.h
+++ b/mythtv/libs/libmythtv/vsync.h
@@ -70,6 +70,7 @@ class VideoSync
     virtual void Start(void);
 
     /** \brief Waits for next a frame or field.
+     *   Returns delay to real frame timing in usec
      *
      *   Start(void), WaitForFrame(void), and Stop(void) should
      *   always be called from same thread, to prevent bad
@@ -78,7 +79,7 @@ class VideoSync
      *  \param sync_delay time until the desired frame or field
      *  \sa CalcDelay(void), KeepPhase(void)
      */
-    virtual void WaitForFrame(int sync_delay) = 0;
+    virtual int WaitForFrame(int sync_delay) = 0;
 
     /// \brief Use the next frame or field for CalcDelay(void)
     ///        and WaitForFrame(int).
@@ -104,7 +105,6 @@ class VideoSync
                                  uint frame_interval, uint refresh_interval,
                                  bool interlaced);
   protected:
-    static void OffsetTimeval(struct timeval& tv, int offset);
     void UpdateNexttrigger(void);
     int CalcDelay(void);
     void KeepPhase(void);
@@ -113,8 +113,9 @@ class VideoSync
     int m_frame_interval; // of video
     int m_refresh_interval; // of display
     bool m_interlaced;
-    struct timeval m_nexttrigger;
+    int64_t m_nexttrigger;
     int m_delay;
+    bool m_synchronous;
     
     static int m_forceskip;
 };
@@ -136,7 +137,7 @@ class DRMVideoSync : public VideoSync
     QString getName(void) const { return QString("DRM"); }
     bool TryInit(void);
     void Start(void);
-    void WaitForFrame(int sync_delay);
+    int WaitForFrame(int sync_delay);
     void AdvanceTrigger(void);
 
   private:
@@ -178,7 +179,7 @@ class OpenGLVideoSync : public VideoSync
     QString getName(void) const { return QString("SGI OpenGL"); }
     bool TryInit(void);
     void Start(void);
-    void WaitForFrame(int sync_delay);
+    int WaitForFrame(int sync_delay);
     void AdvanceTrigger(void);
 
   private:
@@ -207,7 +208,7 @@ class RTCVideoSync : public VideoSync
 
     QString getName(void) const { return QString("RTC"); }
     bool TryInit(void);
-    void WaitForFrame(int sync_delay);
+    int WaitForFrame(int sync_delay);
     void AdvanceTrigger(void);
 
   private:
@@ -228,7 +229,7 @@ class VDPAUVideoSync : public VideoSync
 
     QString getName(void) const { return QString("VDPAU"); }
     bool TryInit(void);
-    void WaitForFrame(int sync_delay);
+    int WaitForFrame(int sync_delay);
     void AdvanceTrigger(void);
 
   private:
@@ -256,7 +257,7 @@ class BusyWaitVideoSync : public VideoSync
 
     QString getName(void) const { return QString("USleep with busy wait"); }
     bool TryInit(void);
-    void WaitForFrame(int sync_delay);
+    int WaitForFrame(int sync_delay);
     void AdvanceTrigger(void);
 
   private:
@@ -284,7 +285,7 @@ class USleepVideoSync : public VideoSync
 
     QString getName(void) const { return QString("USleep"); }
     bool TryInit(void);
-    void WaitForFrame(int sync_delay);
+    int WaitForFrame(int sync_delay);
     void AdvanceTrigger(void);
 };
 #endif /* VSYNC_H_INCLUDED */
