+// TODO: eventsThisFrame parameter can be removed if we use a smarter container.
+bool PanGesture::InputRateConversion( PanInfo& rateConvertedGesture, unsigned int eventsThisFrame,
+ unsigned int currentFrameTime, unsigned int lastFrameTime, bool& justStarted, bool& justFinished )
+{
+ // TODO: Lots of variables on the stack. Needs optimizing.
+ PanInfo readGesture;
+ PanInfo firstReadGesture;
+ unsigned int eventsKeptThisFrame = 0;
+
+ for( unsigned int readPosition = 0; readPosition < eventsThisFrame; ++readPosition )
+ {
+ // Copy the gesture first
+ readGesture = mReadGestures[ readPosition ];
+
+ if( mProfiling )
+ {
+ mProfiling->mRawData.push_back( PanGestureProfiling::Position( readGesture.time, readGesture.screen.position,
+ readGesture.screen.displacement, readGesture.screen.velocity, readGesture.state ) );
+ }
+
+ if( readGesture.state == Gesture::Started )
+ {
+ // Clear pan data.
+ mPanHistory.clear();
+ mPredictionHistory.clear();
+ mLastAcceleration.local = Vector2::ZERO;
+ mLastAcceleration.screen = Vector2::ZERO;
+ mLastInterpolatedAcceleration.local = Vector2::ZERO;
+ mLastInterpolatedAcceleration.screen = Vector2::ZERO;
+ mLastInitialAcceleration.local = Vector2::ZERO;
+ mLastInitialAcceleration.screen = Vector2::ZERO;
+ PanInfo startInfo;
+ mLastGesture = startInfo;
+ mLastSecondInterpolatedPoint = startInfo;
+ mLastPredictedPoint = startInfo;
+ mLastFrameReadGesture = startInfo;
+ rateConvertedGesture = startInfo;
+ firstReadGesture = readGesture;
+ eventsKeptThisFrame = 0;
+ mNotAtTarget = false;
+ justFinished = false;
+ justStarted = true;
+ mInGesture = true;
+
+ if( !mPredictionAmountOverridden )
+ {
+ // If the prediction amount has not been modified, default to the correct amount for this algorithm.
+ mPredictionAmount = DEFAULT_PREDICTION_AMOUNT[1];
+ }
+ mCurrentPredictionAmount = mPredictionAmount;
+ }
+ else
+ {
+ justFinished |= ( readGesture.state == Gesture::Finished || readGesture.state == Gesture::Cancelled );
+ }
+
+ rateConvertedGesture.screen.position += readGesture.screen.position;
+ rateConvertedGesture.local.position += readGesture.local.position;
+ rateConvertedGesture.screen.velocity += readGesture.screen.velocity;
+ rateConvertedGesture.local.velocity += readGesture.local.velocity;
+ rateConvertedGesture.screen.displacement += readGesture.screen.displacement;
+ rateConvertedGesture.local.displacement += readGesture.local.displacement;
+
+ ++eventsKeptThisFrame;
+ }
+
+ bool storeGesture = false;
+ if( eventsKeptThisFrame > 0 )
+ {
+ // Some events were read this frame.
+ if( eventsKeptThisFrame > 1 )
+ {
+ const float eventDivisor = static_cast<float>( eventsKeptThisFrame );
+ rateConvertedGesture.screen.position /= eventDivisor;
+ rateConvertedGesture.local.position /= eventDivisor;
+ rateConvertedGesture.screen.velocity /= eventDivisor;
+ rateConvertedGesture.local.velocity /= eventDivisor;
+ rateConvertedGesture.screen.displacement /= eventDivisor;
+ rateConvertedGesture.local.displacement /= eventDivisor;
+
+ mTargetGesture = readGesture;
+ mNotAtTarget = true;
+ }
+ else
+ {
+ mNotAtTarget = false;
+ }
+
+ rateConvertedGesture.time = currentFrameTime;
+ storeGesture = true;
+ }
+ else
+ {
+ // We did not get any event this frame.
+ // If we just started (or aren't in a gesture), exit.
+ if( !mInGesture || justStarted )
+ {
+ // We cannot guess what the event could be as we have no other events to base the guess from.
+ return false;
+ }
+
+ // As we are currently in a gesture, we can estimate an event.
+ readGesture = mLastFrameReadGesture;
+ readGesture.time = currentFrameTime;
+
+ // Take the last event, halve the acceleration, and use that.
+ const float accelerationDegrade = 2.0f;
+ Vector2 degradedAccelerationLocal( mLastAcceleration.local /= accelerationDegrade );
+ Vector2 degradedAccelerationScreen( mLastAcceleration.screen /= accelerationDegrade );
+
+ float outputTimeGranularity( GetDivisibleTimeDifference( currentFrameTime, lastFrameTime, 1.0f, OUTPUT_TIME_DIFFERENCE ) );
+
+ readGesture.local.velocity = degradedAccelerationLocal * outputTimeGranularity;
+ readGesture.local.displacement = readGesture.local.velocity * outputTimeGranularity;
+ readGesture.local.position = mLastFrameReadGesture.local.position + readGesture.local.displacement;
+ readGesture.screen.velocity = degradedAccelerationScreen * outputTimeGranularity;
+ readGesture.screen.displacement = readGesture.screen.velocity * outputTimeGranularity;
+ readGesture.screen.position = mLastFrameReadGesture.screen.position + readGesture.screen.displacement;
+
+ rateConvertedGesture = readGesture;
+ eventsKeptThisFrame = 1;
+ storeGesture = true;
+ }
+
+ if( eventsKeptThisFrame > 0 )
+ {
+ // Store last read gesture.
+ readGesture.time = currentFrameTime;
+ mLastFrameReadGesture = readGesture;
+
+ if( eventsKeptThisFrame > 2 )
+ {
+ DALI_LOG_WARNING( "Got events this frame:%d (more than 2 will compromise result)\n", eventsKeptThisFrame );
+ }
+ }
+
+ if( storeGesture )
+ {
+ // Store final converted result.
+ mPanHistory.push_back( rateConvertedGesture );
+ }
+ return true;
+}
+
+bool PanGesture::InterpolatePoint( PanInfoHistory& history, unsigned int currentTime, unsigned int targetTime, unsigned int range,
+ PanInfo& outPoint, RelativeVectors& acceleration, int outputTimeGranularity, bool eraseUnused )
+{
+ unsigned int maxHistoryTime = targetTime - range;
+ unsigned int tapsUsed = 0;
+ outPoint.time = targetTime;
+ float divisor = 0.0f;
+ float accelerationDivisor = 0.0f;
+ PanInfoHistoryIter historyBegin = history.begin();
+ PanInfoHistoryIter lastIt = history.end();
+ bool pointGenerated = false;
+ bool havePreviousPoint = false;
+ RelativeVectors newAcceleration;
+
+ // Iterate through point history to perform interpolation.
+ for( PanInfoHistoryIter it = historyBegin; it != history.end(); )
+ {
+ unsigned int gestureTime = it->time;
+
+ if( gestureTime < maxHistoryTime )
+ {
+ // Too far in the past, discard.
+ // Clean history as we go (if requested).
+ if( eraseUnused )
+ {
+ it = history.erase( it );
+ }
+ else
+ {
+ ++it;
+ continue;
+ }
+ }
+ else
+ {
+ float timeDelta( static_cast<float>( abs( int( targetTime - gestureTime ) ) ) );
+ // Handle low time deltas.
+ if( timeDelta < 1.0f )
+ {
+ timeDelta = 1.0f;
+ }
+
+ outPoint.local.position += it->local.position / timeDelta;
+ outPoint.screen.position += it->screen.position / timeDelta;
+ outPoint.local.velocity += it->local.velocity / timeDelta;
+ outPoint.screen.velocity += it->screen.velocity / timeDelta;
+ outPoint.local.displacement += it->local.displacement / timeDelta;
+ outPoint.screen.displacement += it->screen.displacement / timeDelta;
+
+ divisor += 1.0f / timeDelta;
+
+ // Acceleration requires a previous point.
+ if( havePreviousPoint )
+ {
+ // Time delta of input.
+ float timeDifference( GetDivisibleTimeDifference( it->time, lastIt->time, 1.0f, OUTPUT_TIME_DIFFERENCE ) );
+
+ newAcceleration.local += ( ( it->local.velocity - lastIt->local.velocity ) / timeDifference ) / timeDelta;
+ newAcceleration.screen += ( ( it->screen.velocity - lastIt->screen.velocity ) / timeDifference ) / timeDelta;
+
+ accelerationDivisor += 1.0f / timeDelta;
+ }
+ else
+ {
+ havePreviousPoint = true;
+ }
+
+ tapsUsed++;
+ lastIt = it;
+ ++it;
+ }
+ }
+
+ // Divide results by their respective divisors.
+ if( tapsUsed > 0 )
+ {
+ if( divisor > 0.0f )
+ {
+ outPoint.local.position /= divisor;
+ outPoint.screen.position /= divisor;
+ outPoint.local.velocity /= divisor;
+ outPoint.screen.velocity /= divisor;
+ outPoint.local.displacement /= divisor;
+ outPoint.screen.displacement /= divisor;
+ }
+
+ if( tapsUsed > 1 )
+ {
+ if( accelerationDivisor > 0.0f )
+ {
+ newAcceleration.local /= accelerationDivisor;
+ newAcceleration.screen /= accelerationDivisor;
+ }
+
+ float accelerationSmoothing( ACCELERATION_SMOOTHING );
+ newAcceleration.local = ( acceleration.local * accelerationSmoothing ) + ( newAcceleration.local * ( 1.0f - accelerationSmoothing ) );
+ newAcceleration.screen = ( acceleration.screen * accelerationSmoothing ) + ( newAcceleration.screen * ( 1.0f - accelerationSmoothing ) );
+ }
+ else
+ {
+ // If we just started, last velocity was 0. So difference of zero to current velocity over time gives acceleration of the first point.
+ newAcceleration.local = outPoint.local.velocity / static_cast<float>( outputTimeGranularity );
+ newAcceleration.screen = outPoint.screen.velocity / static_cast<float>( outputTimeGranularity );
+ }
+ pointGenerated = true;
+ }
+
+ acceleration.local = newAcceleration.local;
+ acceleration.screen = newAcceleration.screen;
+ return pointGenerated;
+}
+
+float PanGesture::GetDivisibleTimeDifference( int timeA, int timeB, float minimumDelta, float overrideDifference )
+{
+ float timeDifference( overrideDifference );
+ if( mUseActualTimes )
+ {
+ timeDifference = static_cast<float>( abs( timeA - timeB ) );
+ if( timeDifference < minimumDelta )
+ {
+ timeDifference = minimumDelta;
+ }
+ }
+ return timeDifference;
+}
+
+void PanGesture::LimitAccelerationChange( RelativeVectors& currentAcceleration, RelativeVectors& lastAcceleration, float changeLimit )
+{
+ // We don't use the float parameter version of clamp here, as that will create the capping vectors twice in total.
+ Vector2 capMinimum( -changeLimit, -changeLimit );
+ Vector2 capMaximum( changeLimit, changeLimit );
+ Vector2 accelerationDeltaLocal( currentAcceleration.local - lastAcceleration.local );
+ Vector2 accelerationDeltaScreen( currentAcceleration.screen - lastAcceleration.screen );
+ accelerationDeltaLocal.Clamp( capMinimum, capMaximum );
+ accelerationDeltaScreen.Clamp( capMinimum, capMaximum );
+ currentAcceleration.local = lastAcceleration.local + accelerationDeltaLocal;
+ currentAcceleration.screen = lastAcceleration.screen + accelerationDeltaScreen;
+}
+
+void PanGesture::PredictionMode2( PanInfo& startPoint, RelativeVectors& accelerationToUse,
+ PanInfo& predictedPoint, unsigned int currentFrameTime, unsigned int previousFrameTime, bool noPreviousData )
+{
+ // Do the prediction (based on mode).
+ if( mScalarOnlyPredictionEnabled )
+ {
+ // We are doing scalar based prediction.
+ // This divisor is to help tuning by giving the scalar only result
+ // a similar prediction amount to the integrated result.
+ float scalarVelocityMultiplier = static_cast<float>( mCurrentPredictionAmount ) / 1.364f;
+ predictedPoint.local.position = startPoint.local.position + ( startPoint.local.velocity * scalarVelocityMultiplier );
+ predictedPoint.screen.position = startPoint.screen.position + ( startPoint.screen.velocity * scalarVelocityMultiplier );
+ }
+ else
+ {
+ // We are doing integration based prediction.
+ float predictionDelta = static_cast<float>( mCurrentPredictionAmount );
+
+ predictedPoint.local.position = startPoint.local.position + ( startPoint.local.velocity * predictionDelta ) +
+ ( accelerationToUse.local * ( predictionDelta * predictionDelta * 0.5f ) );
+ predictedPoint.screen.position = startPoint.screen.position + ( startPoint.screen.velocity * predictionDelta ) +
+ ( accelerationToUse.screen * ( predictionDelta * predictionDelta * 0.5f ) );
+ }
+
+ // Calculate remaining gesture data from the result.
+ float timeDifference( GetDivisibleTimeDifference( currentFrameTime, previousFrameTime, 1.0f, OUTPUT_TIME_DIFFERENCE ) );
+ if( noPreviousData )
+ {
+ predictedPoint.local.displacement = predictedPoint.local.position - startPoint.local.position;
+ predictedPoint.screen.displacement = predictedPoint.screen.position - startPoint.screen.position;
+ }
+ else
+ {
+ predictedPoint.local.displacement = predictedPoint.local.position - mLastPredictedPoint.local.position;
+ predictedPoint.screen.displacement = predictedPoint.screen.position - mLastPredictedPoint.screen.position;
+ }
+ predictedPoint.local.velocity = predictedPoint.local.displacement / timeDifference;
+ predictedPoint.screen.velocity = predictedPoint.screen.displacement / timeDifference;
+
+ // TODO: Experimental - not used at run time. Left in code for reference only.
+ if( TEST_TUNE_ENABLE_OVERSHOOT_PROTECTION )
+ {
+ // Overshoot protection
+ if( !noPreviousData )
+ {
+ if( ( mLastPredictedPoint.local.velocity.x > Math::MACHINE_EPSILON_0 && predictedPoint.local.velocity.x < Math::MACHINE_EPSILON_0 )
+ || ( mLastPredictedPoint.local.velocity.x < Math::MACHINE_EPSILON_0 && predictedPoint.local.velocity.x > Math::MACHINE_EPSILON_0 ) )
+ {
+ predictedPoint.local.position.x = mLastPredictedPoint.local.position.x;
+ predictedPoint.screen.position.x = mLastPredictedPoint.screen.position.x;
+ mPredictionHistory.clear();
+ }
+ if( ( mLastPredictedPoint.local.velocity.y > Math::MACHINE_EPSILON_0 && predictedPoint.local.velocity.y < Math::MACHINE_EPSILON_0 )
+ || ( mLastPredictedPoint.local.velocity.y < Math::MACHINE_EPSILON_0 && predictedPoint.local.velocity.y > Math::MACHINE_EPSILON_0 ) )
+ {
+ predictedPoint.local.position.y = mLastPredictedPoint.local.position.y;
+ predictedPoint.screen.position.y = mLastPredictedPoint.screen.position.y;
+ mPredictionHistory.clear();
+ }
+ }
+ }
+
+ predictedPoint.time = currentFrameTime;
+ mLastPredictedPoint = predictedPoint;
+}
+
+// TODO: This needs a better name! It is called this instead of prediction mode 2 because:
+// 1) It is the entire workflow, not just prediction.
+// 2) To make it less confusing as there is a function that does prediction alone called PerformPredictionMode2.
+// Ultimately we need to combine the old and new code modularly so there is one code path that can optionally run different functions based on configuration.
+// At the moment, the differences between the inputs & outputs of these different functions prevent that, but this can be resolved.
+bool PanGesture::NewAlgorithm( unsigned int lastVSyncTime, unsigned int nextVSyncTime )
+{
+ if( !mInGesture )
+ {
+ // clear current pan history
+ mPanHistory.clear();
+ mPredictionHistory.clear();
+ }
+
+ /*#########################################################################################
+ #### Read in all gestures received this frame first (holding a lock for a short time)
+ #########################################################################################*/
+
+ unsigned int eventsThisFrame = ReadFrameEvents();
+
+ /*#########################################################################################
+ #### Perform input rate-conversion on all gestures received this frame.
+ #### This also populates the pan history.
+ #########################################################################################*/
+
+ bool justStarted = false;
+ bool justFinished = false;
+ PanInfo rateConvertedGesture;
+ if( !InputRateConversion( rateConvertedGesture, eventsThisFrame, nextVSyncTime, lastVSyncTime, justStarted, justFinished ) )
+ {
+ // There's nothing we can do with the input, exit.
+ return false;
+ }
+
+ /*#########################################################################################
+ #### If we are in gesture, Get first interpolated point with: target time = current time
+ #########################################################################################*/
+
+ bool performUpdate = false;
+ RelativeVectors currentAcceleration;
+ currentAcceleration.local = mLastInitialAcceleration.local;
+ currentAcceleration.screen = mLastInitialAcceleration.screen;
+
+ if( mInGesture || justStarted )
+ {
+ // Get first interpolated point.
+ // TODO: Erase time should be maximum of both interpolated point ranges in past.
+ PanInfo targetPoint;
+ float outputTimeGranularity( GetDivisibleTimeDifference( nextVSyncTime, lastVSyncTime, 1.0f, OUTPUT_TIME_DIFFERENCE ) );
+ bool pointGenerated = InterpolatePoint( mPanHistory, nextVSyncTime, nextVSyncTime, mInterpolationTimeRange,
+ targetPoint, currentAcceleration, static_cast<int>( outputTimeGranularity ), true ); // truncated
+ if( pointGenerated )
+ {
+ mLastInitialAcceleration.local = currentAcceleration.local;
+ mLastInitialAcceleration.screen = currentAcceleration.screen;
+ performUpdate = true;
+ }
+ else
+ {
+ targetPoint = rateConvertedGesture;
+ currentAcceleration.local = mLastInitialAcceleration.local;
+ currentAcceleration.screen = mLastInitialAcceleration.screen;
+ // TODO: Potentially do something to substitute lack of generated point (and perform update).
+ }
+
+ /*#########################################################################################
+ #### Limit the change of acceleration of the first interpolated point since last time
+ #########################################################################################*/
+
+ if( !justStarted )
+ {
+ LimitAccelerationChange( currentAcceleration, mLastAcceleration, ACCELERATION_CAP );
+ }
+ mLastAcceleration.local = currentAcceleration.local;
+ mLastAcceleration.screen = currentAcceleration.screen;
+
+ /*#########################################################################################
+ #### Get second interpolated point, and blend the resultant velocity and acceleration (optional)
+ #########################################################################################*/
+
+ PanInfo outPoint;
+ RelativeVectors interpolatedAcceleration;
+ if( mTwoPointPredictionEnabled )
+ {
+ // Get second interpolated point with target time = current time - past interpolate time.
+ unsigned int pastInterpolateTime = nextVSyncTime - mTwoPointPastInterpolateTime;
+ PanInfo outPoint;
+ RelativeVectors interpolatedAcceleration;
+ interpolatedAcceleration.local = mLastInterpolatedAcceleration.local;
+ interpolatedAcceleration.screen = mLastInterpolatedAcceleration.screen;
+ if( !InterpolatePoint( mPanHistory, nextVSyncTime, pastInterpolateTime, mTwoPointPastInterpolateTime,
+ outPoint, interpolatedAcceleration, static_cast<int>( outputTimeGranularity ), false ) ) // truncated
+ {
+ if( justStarted )
+ {
+ outPoint = targetPoint;
+ }
+ else
+ {
+ outPoint = mLastSecondInterpolatedPoint;
+ }
+ }
+ mLastInterpolatedAcceleration.local = interpolatedAcceleration.local;
+ mLastInterpolatedAcceleration.screen = interpolatedAcceleration.screen;
+ mLastSecondInterpolatedPoint = outPoint;
+
+ // Combine the first interpolated point and the second interpolated point.
+ // by mixing them with the configured amount. This is done for acceleration and velocity.
+ // It could be optionally done for position too, but this typically is worse as it means we have to predict further ahead.
+ float currentVelocityMultiplier( 1.0f - mTwoPointVelocityBias );
+ float lastVelocityMultiplier( mTwoPointVelocityBias );
+ targetPoint.local.velocity = ( outPoint.local.velocity * lastVelocityMultiplier ) + ( targetPoint.local.velocity * currentVelocityMultiplier );
+ targetPoint.screen.velocity = ( outPoint.screen.velocity * lastVelocityMultiplier ) + ( targetPoint.screen.velocity * currentVelocityMultiplier );
+ float currentAccelerationMultiplier( 1.0f - mTwoPointAccelerationBias );
+ float lastAccelerationMultiplier( mTwoPointAccelerationBias );
+ currentAcceleration.local = ( interpolatedAcceleration.local * lastAccelerationMultiplier ) + ( currentAcceleration.local * currentAccelerationMultiplier );
+ currentAcceleration.screen = ( interpolatedAcceleration.screen * lastAccelerationMultiplier ) + ( currentAcceleration.screen * currentAccelerationMultiplier );
+ }
+
+ /*#########################################################################################
+ #### Perform prediction
+ #########################################################################################*/
+
+ PanInfo predictedPoint;
+ PredictionMode2( targetPoint, currentAcceleration, predictedPoint, nextVSyncTime, lastVSyncTime, justStarted );
+ targetPoint = predictedPoint;
+
+ /*#########################################################################################
+ #### Smoothing
+ #########################################################################################*/
+
+ // If we are using multi-tap smoothing, keep a history of predicted results.
+ if( mSmoothingMode == SMOOTHING_MULTI_TAP )
+ {
+ mPredictionHistory.push_back( targetPoint );
+ }
+
+ if( !justStarted )
+ {
+ float outputTimeGranularity( GetDivisibleTimeDifference( nextVSyncTime, lastVSyncTime, 1.0f, OUTPUT_TIME_DIFFERENCE ) );
+ if( mSmoothingMode == SMOOTHING_MULTI_TAP )
+ {
+ // Perform Multi-tap Smoothing.
+ RelativeVectors blank;
+ InterpolatePoint( mPredictionHistory, nextVSyncTime, nextVSyncTime, mMultiTapSmoothingRange,
+ targetPoint, blank, static_cast<int>( outputTimeGranularity ), true ); // truncated
+ }
+ else
+ {
+ // Perform Single-tap Smoothing.
+ if( !mSmoothingAmountOverridden )
+ {
+ // If the smoothing amount has not been modified, default to the correct amount for this algorithm.
+ mSmoothingAmount = DEFAULT_SMOOTHING_AMOUNT[1];
+ }
+ BlendPoints( targetPoint, mLastGesture, mSmoothingAmount );
+ }
+
+ /*#########################################################################################
+ #### Finalize other point data (from position)
+ #########################################################################################*/
+
+ targetPoint.local.displacement = targetPoint.local.position - mLastGesture.local.position;
+ targetPoint.local.velocity = targetPoint.local.displacement / outputTimeGranularity;
+ targetPoint.screen.displacement = targetPoint.screen.position - mLastGesture.screen.position;
+ targetPoint.screen.velocity = targetPoint.screen.displacement / outputTimeGranularity;
+ }
+
+ /*#########################################################################################
+ #### Send out the new point, by setting the properties
+ #### (Constraints will automatically react to this)
+ #########################################################################################*/
+
+ if( performUpdate )
+ {
+ mPanning.Set( mInGesture & !justFinished );
+ mScreenPosition.Set( targetPoint.screen.position );
+ mScreenDisplacement.Set( targetPoint.screen.displacement );
+ mScreenVelocity.Set( targetPoint.screen.velocity );
+ mLocalPosition.Set( targetPoint.local.position );
+ mLocalDisplacement.Set( targetPoint.local.displacement );
+ mLocalVelocity.Set( targetPoint.local.velocity );
+
+ mLastGesture = targetPoint;
+
+ if( mProfiling )
+ {
+ mProfiling->mAveragedData.push_back( PanGestureProfiling::Position( targetPoint.time, targetPoint.screen.position,
+ targetPoint.screen.displacement, targetPoint.screen.velocity, targetPoint.state ) );
+ }
+ }
+ }
+
+ mInGesture = mInGesture && !justFinished;
+
+ return performUpdate;
+}
+
+