+ float timeDifference( overrideDifference );
+ if( mUseActualTimes )
+ {
+ timeDifference = static_cast<float>( abs( timeA - timeB ) );
+ if( timeDifference < minimumDelta )
+ {
+ timeDifference = minimumDelta;
+ }
+ }
+ return timeDifference;
+}
+
+void PanGesture::LimitAccelerationChange( RelativeVectors& currentAcceleration, RelativeVectors& lastAcceleration, float changeLimit )
+{
+ // We don't use the float parameter version of clamp here, as that will create the capping vectors twice in total.
+ Vector2 capMinimum( -changeLimit, -changeLimit );
+ Vector2 capMaximum( changeLimit, changeLimit );
+ Vector2 accelerationDeltaLocal( currentAcceleration.local - lastAcceleration.local );
+ Vector2 accelerationDeltaScreen( currentAcceleration.screen - lastAcceleration.screen );
+ accelerationDeltaLocal.Clamp( capMinimum, capMaximum );
+ accelerationDeltaScreen.Clamp( capMinimum, capMaximum );
+ currentAcceleration.local = lastAcceleration.local + accelerationDeltaLocal;
+ currentAcceleration.screen = lastAcceleration.screen + accelerationDeltaScreen;
+}
+
+void PanGesture::PredictionMode2( PanInfo& startPoint, RelativeVectors& accelerationToUse,
+ PanInfo& predictedPoint, unsigned int currentFrameTime, unsigned int previousFrameTime, bool noPreviousData )
+{
+ // Do the prediction (based on mode).
+ if( mScalarOnlyPredictionEnabled )
+ {
+ // We are doing scalar based prediction.
+ // This divisor is to help tuning by giving the scalar only result
+ // a similar prediction amount to the integrated result.
+ float scalarVelocityMultiplier = static_cast<float>( mCurrentPredictionAmount ) / 1.364f;
+ predictedPoint.local.position = startPoint.local.position + ( startPoint.local.velocity * scalarVelocityMultiplier );
+ predictedPoint.screen.position = startPoint.screen.position + ( startPoint.screen.velocity * scalarVelocityMultiplier );
+ }
+ else
+ {
+ // We are doing integration based prediction.
+ float predictionDelta = static_cast<float>( mCurrentPredictionAmount );
+
+ predictedPoint.local.position = startPoint.local.position + ( startPoint.local.velocity * predictionDelta ) +
+ ( accelerationToUse.local * ( predictionDelta * predictionDelta * 0.5f ) );
+ predictedPoint.screen.position = startPoint.screen.position + ( startPoint.screen.velocity * predictionDelta ) +
+ ( accelerationToUse.screen * ( predictionDelta * predictionDelta * 0.5f ) );
+ }
+
+ // Calculate remaining gesture data from the result.
+ float timeDifference( GetDivisibleTimeDifference( currentFrameTime, previousFrameTime, 1.0f, OUTPUT_TIME_DIFFERENCE ) );
+ if( noPreviousData )
+ {
+ predictedPoint.local.displacement = predictedPoint.local.position - startPoint.local.position;
+ predictedPoint.screen.displacement = predictedPoint.screen.position - startPoint.screen.position;
+ }
+ else
+ {
+ predictedPoint.local.displacement = predictedPoint.local.position - mLastPredictedPoint.local.position;
+ predictedPoint.screen.displacement = predictedPoint.screen.position - mLastPredictedPoint.screen.position;
+ }
+ predictedPoint.local.velocity = predictedPoint.local.displacement / timeDifference;
+ predictedPoint.screen.velocity = predictedPoint.screen.displacement / timeDifference;
+
+ // TODO: Experimental - not used at run time. Left in code for reference only.
+ if( TEST_TUNE_ENABLE_OVERSHOOT_PROTECTION )
+ {
+ // Overshoot protection
+ if( !noPreviousData )
+ {
+ if( ( mLastPredictedPoint.local.velocity.x > Math::MACHINE_EPSILON_0 && predictedPoint.local.velocity.x < Math::MACHINE_EPSILON_0 )
+ || ( mLastPredictedPoint.local.velocity.x < Math::MACHINE_EPSILON_0 && predictedPoint.local.velocity.x > Math::MACHINE_EPSILON_0 ) )
+ {
+ predictedPoint.local.position.x = mLastPredictedPoint.local.position.x;
+ predictedPoint.screen.position.x = mLastPredictedPoint.screen.position.x;
+ mPredictionHistory.clear();
+ }
+ if( ( mLastPredictedPoint.local.velocity.y > Math::MACHINE_EPSILON_0 && predictedPoint.local.velocity.y < Math::MACHINE_EPSILON_0 )
+ || ( mLastPredictedPoint.local.velocity.y < Math::MACHINE_EPSILON_0 && predictedPoint.local.velocity.y > Math::MACHINE_EPSILON_0 ) )
+ {
+ predictedPoint.local.position.y = mLastPredictedPoint.local.position.y;
+ predictedPoint.screen.position.y = mLastPredictedPoint.screen.position.y;
+ mPredictionHistory.clear();
+ }
+ }
+ }
+
+ predictedPoint.time = currentFrameTime;
+ mLastPredictedPoint = predictedPoint;
+}
+
+// TODO: This needs a better name! It is called this instead of prediction mode 2 because:
+// 1) It is the entire workflow, not just prediction.
+// 2) To make it less confusing as there is a function that does prediction alone called PerformPredictionMode2.
+// Ultimately we need to combine the old and new code modularly so there is one code path that can optionally run different functions based on configuration.
+// At the moment, the differences between the inputs & outputs of these different functions prevent that, but this can be resolved.
+bool PanGesture::NewAlgorithm( unsigned int lastVSyncTime, unsigned int nextVSyncTime )
+{
+ if( !mInGesture )
+ {
+ // clear current pan history
+ mPanHistory.clear();
+ mPredictionHistory.clear();
+ }
+
+ /*#########################################################################################
+ #### Read in all gestures received this frame first (holding a lock for a short time)
+ #########################################################################################*/
+
+ unsigned int eventsThisFrame = ReadFrameEvents();
+
+ /*#########################################################################################
+ #### Perform input rate-conversion on all gestures received this frame.
+ #### This also populates the pan history.
+ #########################################################################################*/
+
+ bool justStarted = false;
+ bool justFinished = false;
+ PanInfo rateConvertedGesture;
+ if( !InputRateConversion( rateConvertedGesture, eventsThisFrame, nextVSyncTime, lastVSyncTime, justStarted, justFinished ) )
+ {
+ // There's nothing we can do with the input, exit.
+ return false;
+ }
+
+ /*#########################################################################################
+ #### If we are in gesture, Get first interpolated point with: target time = current time
+ #########################################################################################*/
+
+ bool performUpdate = false;
+ RelativeVectors currentAcceleration;
+ currentAcceleration.local = mLastInitialAcceleration.local;
+ currentAcceleration.screen = mLastInitialAcceleration.screen;
+
+ if( mInGesture || justStarted )
+ {
+ // Get first interpolated point.
+ // TODO: Erase time should be maximum of both interpolated point ranges in past.
+ PanInfo targetPoint;
+ float outputTimeGranularity( GetDivisibleTimeDifference( nextVSyncTime, lastVSyncTime, 1.0f, OUTPUT_TIME_DIFFERENCE ) );
+ bool pointGenerated = InterpolatePoint( mPanHistory, nextVSyncTime, nextVSyncTime, mInterpolationTimeRange,
+ targetPoint, currentAcceleration, static_cast<int>( outputTimeGranularity ), true ); // truncated
+ if( pointGenerated )
+ {
+ mLastInitialAcceleration.local = currentAcceleration.local;
+ mLastInitialAcceleration.screen = currentAcceleration.screen;
+ performUpdate = true;
+ }
+ else
+ {
+ targetPoint = rateConvertedGesture;
+ currentAcceleration.local = mLastInitialAcceleration.local;
+ currentAcceleration.screen = mLastInitialAcceleration.screen;
+ // TODO: Potentially do something to substitute lack of generated point (and perform update).
+ }
+
+ /*#########################################################################################
+ #### Limit the change of acceleration of the first interpolated point since last time
+ #########################################################################################*/
+
+ if( !justStarted )
+ {
+ LimitAccelerationChange( currentAcceleration, mLastAcceleration, ACCELERATION_CAP );
+ }
+ mLastAcceleration.local = currentAcceleration.local;
+ mLastAcceleration.screen = currentAcceleration.screen;
+
+ /*#########################################################################################
+ #### Get second interpolated point, and blend the resultant velocity and acceleration (optional)
+ #########################################################################################*/
+
+ PanInfo outPoint;
+ RelativeVectors interpolatedAcceleration;
+ if( mTwoPointPredictionEnabled )
+ {
+ // Get second interpolated point with target time = current time - past interpolate time.
+ unsigned int pastInterpolateTime = nextVSyncTime - mTwoPointPastInterpolateTime;
+ PanInfo outPoint;
+ RelativeVectors interpolatedAcceleration;
+ interpolatedAcceleration.local = mLastInterpolatedAcceleration.local;
+ interpolatedAcceleration.screen = mLastInterpolatedAcceleration.screen;
+ if( !InterpolatePoint( mPanHistory, nextVSyncTime, pastInterpolateTime, mTwoPointPastInterpolateTime,
+ outPoint, interpolatedAcceleration, static_cast<int>( outputTimeGranularity ), false ) ) // truncated
+ {
+ if( justStarted )
+ {
+ outPoint = targetPoint;
+ }
+ else
+ {
+ outPoint = mLastSecondInterpolatedPoint;
+ }
+ }
+ mLastInterpolatedAcceleration.local = interpolatedAcceleration.local;
+ mLastInterpolatedAcceleration.screen = interpolatedAcceleration.screen;
+ mLastSecondInterpolatedPoint = outPoint;
+
+ // Combine the first interpolated point and the second interpolated point.
+ // by mixing them with the configured amount. This is done for acceleration and velocity.
+ // It could be optionally done for position too, but this typically is worse as it means we have to predict further ahead.
+ float currentVelocityMultiplier( 1.0f - mTwoPointVelocityBias );
+ float lastVelocityMultiplier( mTwoPointVelocityBias );
+ targetPoint.local.velocity = ( outPoint.local.velocity * lastVelocityMultiplier ) + ( targetPoint.local.velocity * currentVelocityMultiplier );
+ targetPoint.screen.velocity = ( outPoint.screen.velocity * lastVelocityMultiplier ) + ( targetPoint.screen.velocity * currentVelocityMultiplier );
+ float currentAccelerationMultiplier( 1.0f - mTwoPointAccelerationBias );
+ float lastAccelerationMultiplier( mTwoPointAccelerationBias );
+ currentAcceleration.local = ( interpolatedAcceleration.local * lastAccelerationMultiplier ) + ( currentAcceleration.local * currentAccelerationMultiplier );
+ currentAcceleration.screen = ( interpolatedAcceleration.screen * lastAccelerationMultiplier ) + ( currentAcceleration.screen * currentAccelerationMultiplier );
+ }
+
+ /*#########################################################################################
+ #### Perform prediction
+ #########################################################################################*/
+
+ PanInfo predictedPoint;
+ PredictionMode2( targetPoint, currentAcceleration, predictedPoint, nextVSyncTime, lastVSyncTime, justStarted );
+ targetPoint = predictedPoint;
+
+ /*#########################################################################################
+ #### Smoothing
+ #########################################################################################*/
+
+ // If we are using multi-tap smoothing, keep a history of predicted results.
+ if( mSmoothingMode == SMOOTHING_MULTI_TAP )
+ {
+ mPredictionHistory.push_back( targetPoint );
+ }
+
+ if( !justStarted )
+ {
+ float outputTimeGranularity( GetDivisibleTimeDifference( nextVSyncTime, lastVSyncTime, 1.0f, OUTPUT_TIME_DIFFERENCE ) );
+ if( mSmoothingMode == SMOOTHING_MULTI_TAP )
+ {
+ // Perform Multi-tap Smoothing.
+ RelativeVectors blank;
+ InterpolatePoint( mPredictionHistory, nextVSyncTime, nextVSyncTime, mMultiTapSmoothingRange,
+ targetPoint, blank, static_cast<int>( outputTimeGranularity ), true ); // truncated
+ }
+ else
+ {
+ // Perform Single-tap Smoothing.
+ if( !mSmoothingAmountOverridden )
+ {
+ // If the smoothing amount has not been modified, default to the correct amount for this algorithm.
+ mSmoothingAmount = DEFAULT_SMOOTHING_AMOUNT[1];
+ }
+ BlendPoints( targetPoint, mLastGesture, mSmoothingAmount );
+ }
+
+ /*#########################################################################################
+ #### Finalize other point data (from position)
+ #########################################################################################*/
+
+ targetPoint.local.displacement = targetPoint.local.position - mLastGesture.local.position;
+ targetPoint.local.velocity = targetPoint.local.displacement / outputTimeGranularity;
+ targetPoint.screen.displacement = targetPoint.screen.position - mLastGesture.screen.position;
+ targetPoint.screen.velocity = targetPoint.screen.displacement / outputTimeGranularity;
+ }
+
+ /*#########################################################################################
+ #### Send out the new point, by setting the properties
+ #### (Constraints will automatically react to this)
+ #########################################################################################*/
+
+ if( performUpdate )
+ {
+ mPanning.Set( mInGesture & !justFinished );
+ mScreenPosition.Set( targetPoint.screen.position );
+ mScreenDisplacement.Set( targetPoint.screen.displacement );
+ mScreenVelocity.Set( targetPoint.screen.velocity );
+ mLocalPosition.Set( targetPoint.local.position );
+ mLocalDisplacement.Set( targetPoint.local.displacement );
+ mLocalVelocity.Set( targetPoint.local.velocity );
+
+ mLastGesture = targetPoint;
+
+ if( mProfiling )
+ {
+ mProfiling->mAveragedData.push_back( PanGestureProfiling::Position( targetPoint.time, targetPoint.screen.position,
+ targetPoint.screen.displacement, targetPoint.screen.velocity, targetPoint.state ) );
+ }
+ }
+ }
+
+ mInGesture = mInGesture && !justFinished;
+
+ return performUpdate;