+ mSmoothingAmount( DEFAULT_SMOOTHING_AMOUNT[0] ),
+ mUseActualTimes( DEFAULT_USE_ACTUAL_TIMES ),
+ mInterpolationTimeRange( DEFAULT_INTERPOLATION_TIME_RANGE ),
+ mScalarOnlyPredictionEnabled( DEFAULT_SCALAR_ONLY_PREDICTION_ENABLED ),
+ mTwoPointPredictionEnabled( DEFAULT_TWO_POINT_PREDICTION_ENABLED ),
+ mTwoPointPastInterpolateTime( DEFAULT_TWO_POINT_PAST_INTERPOLATE_TIME ),
+ mTwoPointVelocityBias( DEFAULT_TWO_POINT_VELOCITY_BIAS ),
+ mTwoPointAccelerationBias( DEFAULT_TWO_POINT_ACCELERATION_BIAS ),
+ mMultiTapSmoothingRange( DEFAULT_MULTITAP_SMOOTHING_RANGE )
+{
+
+}
+
+// Prediction mode 2 related code and functions follow:
+
+unsigned int PanGesture::ReadFrameEvents()
+{
+ unsigned int eventsThisFrame;
+ // Copy the events into a linear buffer while holding the mutex.
+ // This is so the lock is not held while any processing is done.
+ Dali::Mutex::ScopedLock lock( mMutex );
+ for( eventsThisFrame = 0; mReadPosition != mWritePosition; ++eventsThisFrame )
+ {
+ mReadGestures[ eventsThisFrame ] = mGestures[ mReadPosition ];
+ ++mReadPosition;
+ mReadPosition %= PAN_GESTURE_HISTORY;
+ }
+ return eventsThisFrame;
+}
+
+// TODO: eventsThisFrame parameter can be removed if we use a smarter container.
+bool PanGesture::InputRateConversion( PanInfo& rateConvertedGesture, unsigned int eventsThisFrame,
+ unsigned int currentFrameTime, unsigned int lastFrameTime, bool& justStarted, bool& justFinished )
+{
+ // TODO: Lots of variables on the stack. Needs optimizing.
+ PanInfo readGesture;
+ PanInfo firstReadGesture;
+ unsigned int eventsKeptThisFrame = 0;
+
+ for( unsigned int readPosition = 0; readPosition < eventsThisFrame; ++readPosition )
+ {
+ // Copy the gesture first
+ readGesture = mReadGestures[ readPosition ];
+
+ if( mProfiling )
+ {
+ mProfiling->mRawData.push_back( PanGestureProfiling::Position( readGesture.time, readGesture.screen.position,
+ readGesture.screen.displacement, readGesture.screen.velocity, readGesture.state ) );
+ }
+
+ if( readGesture.state == Gesture::Started )
+ {
+ // Clear pan data.
+ mPanHistory.clear();
+ mPredictionHistory.clear();
+ mLastAcceleration.local = Vector2::ZERO;
+ mLastAcceleration.screen = Vector2::ZERO;
+ mLastInterpolatedAcceleration.local = Vector2::ZERO;
+ mLastInterpolatedAcceleration.screen = Vector2::ZERO;
+ mLastInitialAcceleration.local = Vector2::ZERO;
+ mLastInitialAcceleration.screen = Vector2::ZERO;
+ PanInfo startInfo;
+ mLastGesture = startInfo;
+ mLastSecondInterpolatedPoint = startInfo;
+ mLastPredictedPoint = startInfo;
+ mLastFrameReadGesture = startInfo;
+ rateConvertedGesture = startInfo;
+ firstReadGesture = readGesture;
+ eventsKeptThisFrame = 0;
+ mNotAtTarget = false;
+ justFinished = false;
+ justStarted = true;
+ mInGesture = true;
+
+ if( !mPredictionAmountOverridden )
+ {
+ // If the prediction amount has not been modified, default to the correct amount for this algorithm.
+ mPredictionAmount = DEFAULT_PREDICTION_AMOUNT[1];
+ }
+ mCurrentPredictionAmount = mPredictionAmount;
+ }
+ else
+ {
+ justFinished |= ( readGesture.state == Gesture::Finished || readGesture.state == Gesture::Cancelled );
+ }
+
+ rateConvertedGesture.screen.position += readGesture.screen.position;
+ rateConvertedGesture.local.position += readGesture.local.position;
+ rateConvertedGesture.screen.velocity += readGesture.screen.velocity;
+ rateConvertedGesture.local.velocity += readGesture.local.velocity;
+ rateConvertedGesture.screen.displacement += readGesture.screen.displacement;
+ rateConvertedGesture.local.displacement += readGesture.local.displacement;
+
+ ++eventsKeptThisFrame;
+ }
+
+ bool storeGesture = false;
+ if( eventsKeptThisFrame > 0 )
+ {
+ // Some events were read this frame.
+ if( eventsKeptThisFrame > 1 )
+ {
+ float eventDivisor( eventsKeptThisFrame );
+ rateConvertedGesture.screen.position /= eventDivisor;
+ rateConvertedGesture.local.position /= eventDivisor;
+ rateConvertedGesture.screen.velocity /= eventDivisor;
+ rateConvertedGesture.local.velocity /= eventDivisor;
+ rateConvertedGesture.screen.displacement /= eventDivisor;
+ rateConvertedGesture.local.displacement /= eventDivisor;
+
+ mTargetGesture = readGesture;
+ mNotAtTarget = true;
+ }
+ else
+ {
+ mNotAtTarget = false;
+ }
+
+ rateConvertedGesture.time = currentFrameTime;
+ storeGesture = true;
+ }
+ else
+ {
+ // We did not get any event this frame.
+ // If we just started (or aren't in a gesture), exit.
+ if( !mInGesture || justStarted )
+ {
+ // We cannot guess what the event could be as we have no other events to base the guess from.
+ return false;
+ }
+
+ // As we are currently in a gesture, we can estimate an event.
+ readGesture = mLastFrameReadGesture;
+ readGesture.time = currentFrameTime;
+
+ // Take the last event, halve the acceleration, and use that.
+ const float accelerationDegrade = 2.0f;
+ Vector2 degradedAccelerationLocal( mLastAcceleration.local /= accelerationDegrade );
+ Vector2 degradedAccelerationScreen( mLastAcceleration.screen /= accelerationDegrade );
+
+ float outputTimeGranularity( GetDivisibleTimeDifference( currentFrameTime, lastFrameTime, 1.0f, OUTPUT_TIME_DIFFERENCE ) );
+
+ readGesture.local.velocity = degradedAccelerationLocal * outputTimeGranularity;
+ readGesture.local.displacement = readGesture.local.velocity * outputTimeGranularity;
+ readGesture.local.position = mLastFrameReadGesture.local.position + readGesture.local.displacement;
+ readGesture.screen.velocity = degradedAccelerationScreen * outputTimeGranularity;
+ readGesture.screen.displacement = readGesture.screen.velocity * outputTimeGranularity;
+ readGesture.screen.position = mLastFrameReadGesture.screen.position + readGesture.screen.displacement;
+
+ rateConvertedGesture = readGesture;
+ eventsKeptThisFrame = 1;
+ storeGesture = true;
+ }
+
+ if( eventsKeptThisFrame > 0 )
+ {
+ // Store last read gesture.
+ readGesture.time = currentFrameTime;
+ mLastFrameReadGesture = readGesture;
+
+ if( eventsKeptThisFrame > 2 )
+ {
+ DALI_LOG_WARNING( "Got events this frame:%d (more than 2 will compromise result)\n", eventsKeptThisFrame );
+ }
+ }
+
+ if( storeGesture )
+ {
+ // Store final converted result.
+ mPanHistory.push_back( rateConvertedGesture );
+ }
+ return true;
+}
+
+bool PanGesture::InterpolatePoint( PanInfoHistory& history, unsigned int currentTime, unsigned int targetTime, unsigned int range,
+ PanInfo& outPoint, RelativeVectors& acceleration, int outputTimeGranularity, bool eraseUnused )
+{
+ unsigned int discardInputBufferTime = targetTime + range;
+ unsigned int maxHistoryTime = targetTime - range;
+ unsigned int tapsUsed = 0;
+ outPoint.time = targetTime;
+ float divisor = 0.0f;
+ float accelerationDivisor = 0.0f;
+ PanInfoHistoryIter historyBegin = history.begin();
+ PanInfoHistoryIter lastIt = history.end();
+ bool pointGenerated = false;
+ RelativeVectors newAcceleration;
+
+ // Iterate through point history to perform interpolation.
+ for( PanInfoHistoryIter it = historyBegin; it != history.end(); )
+ {
+ unsigned int gestureTime = it->time;
+
+ if( gestureTime < maxHistoryTime )
+ {
+ // Too far in the past, discard.
+ // Clean history as we go (if requested).
+ if( eraseUnused )
+ {
+ it = history.erase( it );
+ }
+ else
+ {
+ ++it;
+ continue;
+ }
+ }
+ else
+ {
+ if( gestureTime > discardInputBufferTime )
+ {
+ // Too far in the future, discard (and finish).
+ break;
+ }
+
+ float timeDelta( static_cast<float>( abs( int( targetTime - gestureTime ) ) ) );
+ // Handle low time deltas.
+ if( timeDelta < 1.0f )
+ {
+ timeDelta = 1.0f;
+ }
+
+ outPoint.local.position += it->local.position / timeDelta;
+ outPoint.screen.position += it->screen.position / timeDelta;
+ outPoint.local.velocity += it->local.velocity / timeDelta;
+ outPoint.screen.velocity += it->screen.velocity / timeDelta;
+ outPoint.local.displacement += it->local.displacement / timeDelta;
+ outPoint.screen.displacement += it->screen.displacement / timeDelta;
+
+ divisor += 1.0f / timeDelta;
+
+ // Acceleration requires a previous point.
+ if( lastIt != history.end() )
+ {
+ // Time delta of input.
+ float timeDifference( GetDivisibleTimeDifference( it->time, lastIt->time, 1.0f, OUTPUT_TIME_DIFFERENCE ) );
+
+ newAcceleration.local += ( ( it->local.velocity - lastIt->local.velocity ) / timeDifference ) / timeDelta;
+ newAcceleration.screen += ( ( it->screen.velocity - lastIt->screen.velocity ) / timeDifference ) / timeDelta;
+
+ accelerationDivisor += 1.0f / timeDelta;
+ }
+
+ tapsUsed++;
+ lastIt = it;
+ ++it;
+ }
+ }
+
+ // Divide results by their respective divisors.
+ if( tapsUsed > 0 )
+ {
+ if( divisor > 0.0f )
+ {
+ outPoint.local.position /= divisor;
+ outPoint.screen.position /= divisor;
+ outPoint.local.velocity /= divisor;
+ outPoint.screen.velocity /= divisor;
+ outPoint.local.displacement /= divisor;
+ outPoint.screen.displacement /= divisor;
+ }
+
+ if( tapsUsed > 1 )
+ {
+ if( accelerationDivisor > 0.0f )
+ {
+ newAcceleration.local /= accelerationDivisor;
+ newAcceleration.screen /= accelerationDivisor;
+ }
+
+ float accelerationSmoothing( ACCELERATION_SMOOTHING );
+ newAcceleration.local = ( acceleration.local * accelerationSmoothing ) + ( newAcceleration.local * ( 1.0f - accelerationSmoothing ) );
+ newAcceleration.screen = ( acceleration.screen * accelerationSmoothing ) + ( newAcceleration.screen * ( 1.0f - accelerationSmoothing ) );
+ }
+ else
+ {
+ // If we just started, last velocity was 0. So difference of zero to current velocity over time gives acceleration of the first point.
+ newAcceleration.local = outPoint.local.velocity / outputTimeGranularity;
+ newAcceleration.screen = outPoint.screen.velocity / outputTimeGranularity;
+ }
+ pointGenerated = true;
+ }
+
+ acceleration.local = newAcceleration.local;
+ acceleration.screen = newAcceleration.screen;
+ return pointGenerated;
+}
+
+float PanGesture::GetDivisibleTimeDifference( int timeA, int timeB, float minimumDelta, float overrideDifference )
+{
+ float timeDifference( overrideDifference );
+ if( mUseActualTimes )
+ {
+ timeDifference = static_cast<float>( abs( timeA - timeB ) );
+ if( timeDifference < minimumDelta )
+ {
+ timeDifference = minimumDelta;
+ }
+ }
+ return timeDifference;
+}
+
+void PanGesture::LimitAccelerationChange( RelativeVectors& currentAcceleration, RelativeVectors& lastAcceleration, float changeLimit )
+{
+ // We don't use the float parameter version of clamp here, as that will create the capping vectors twice in total.
+ Vector2 capMinimum( -changeLimit, -changeLimit );
+ Vector2 capMaximum( changeLimit, changeLimit );
+ Vector2 accelerationDeltaLocal( currentAcceleration.local - lastAcceleration.local );
+ Vector2 accelerationDeltaScreen( currentAcceleration.screen - lastAcceleration.screen );
+ accelerationDeltaLocal.Clamp( capMinimum, capMaximum );
+ accelerationDeltaScreen.Clamp( capMinimum, capMaximum );
+ currentAcceleration.local = lastAcceleration.local + accelerationDeltaLocal;
+ currentAcceleration.screen = lastAcceleration.screen + accelerationDeltaScreen;
+}
+
+void PanGesture::PredictionMode2( PanInfo& startPoint, RelativeVectors& accelerationToUse,
+ PanInfo& predictedPoint, unsigned int currentFrameTime, unsigned int previousFrameTime, bool noPreviousData )
+{
+ // Do the prediction (based on mode).
+ if( mScalarOnlyPredictionEnabled )
+ {
+ // We are doing scalar based prediction.
+ // This divisor is to help tuning by giving the scalar only result
+ // a similar prediction amount to the integrated result.
+ float scalarVelocityMultiplier = static_cast<float>( mCurrentPredictionAmount ) / 1.364f;
+ predictedPoint.local.position = startPoint.local.position + ( startPoint.local.velocity * scalarVelocityMultiplier );
+ predictedPoint.screen.position = startPoint.screen.position + ( startPoint.screen.velocity * scalarVelocityMultiplier );
+ }
+ else
+ {
+ // We are doing integration based prediction.
+ float predictionDelta( mCurrentPredictionAmount );
+
+ predictedPoint.local.position = startPoint.local.position + ( startPoint.local.velocity * predictionDelta ) +
+ ( accelerationToUse.local * ( predictionDelta * predictionDelta * 0.5f ) );
+ predictedPoint.screen.position = startPoint.screen.position + ( startPoint.screen.velocity * predictionDelta ) +
+ ( accelerationToUse.screen * ( predictionDelta * predictionDelta * 0.5f ) );
+ }
+
+ // Calculate remaining gesture data from the result.
+ float timeDifference( GetDivisibleTimeDifference( currentFrameTime, previousFrameTime, 1.0f, OUTPUT_TIME_DIFFERENCE ) );
+ if( noPreviousData )
+ {
+ predictedPoint.local.displacement = predictedPoint.local.position - startPoint.local.position;
+ predictedPoint.screen.displacement = predictedPoint.screen.position - startPoint.screen.position;
+ }
+ else
+ {
+ predictedPoint.local.displacement = predictedPoint.local.position - mLastPredictedPoint.local.position;
+ predictedPoint.screen.displacement = predictedPoint.screen.position - mLastPredictedPoint.screen.position;
+ }
+ predictedPoint.local.velocity = predictedPoint.local.displacement / timeDifference;
+ predictedPoint.screen.velocity = predictedPoint.screen.displacement / timeDifference;
+
+ // TODO: Experimental - not used at run time. Left in code for reference only.
+ if( TEST_TUNE_ENABLE_OVERSHOOT_PROTECTION )
+ {
+ // Overshoot protection
+ if( !noPreviousData )
+ {
+ if( ( mLastPredictedPoint.local.velocity.x > Math::MACHINE_EPSILON_0 && predictedPoint.local.velocity.x < Math::MACHINE_EPSILON_0 )
+ || ( mLastPredictedPoint.local.velocity.x < Math::MACHINE_EPSILON_0 && predictedPoint.local.velocity.x > Math::MACHINE_EPSILON_0 ) )
+ {
+ predictedPoint.local.position.x = mLastPredictedPoint.local.position.x;
+ predictedPoint.screen.position.x = mLastPredictedPoint.screen.position.x;
+ mPredictionHistory.clear();
+ }
+ if( ( mLastPredictedPoint.local.velocity.y > Math::MACHINE_EPSILON_0 && predictedPoint.local.velocity.y < Math::MACHINE_EPSILON_0 )
+ || ( mLastPredictedPoint.local.velocity.y < Math::MACHINE_EPSILON_0 && predictedPoint.local.velocity.y > Math::MACHINE_EPSILON_0 ) )
+ {
+ predictedPoint.local.position.y = mLastPredictedPoint.local.position.y;
+ predictedPoint.screen.position.y = mLastPredictedPoint.screen.position.y;
+ mPredictionHistory.clear();
+ }
+ }
+ }
+
+ predictedPoint.time = currentFrameTime;
+ mLastPredictedPoint = predictedPoint;
+}
+
+// TODO: This needs a better name! It is called this instead of prediction mode 2 because:
+// 1) It is the entire workflow, not just prediction.
+// 2) To make it less confusing as there is a function that does prediction alone called PerformPredictionMode2.
+// Ultimately we need to combine the old and new code modularly so there is one code path that can optionally run different functions based on configuration.
+// At the moment, the differences between the inputs & outputs of these different functions prevent that, but this can be resolved.
+bool PanGesture::NewAlgorithm( unsigned int lastVSyncTime, unsigned int nextVSyncTime )