"baseColorTexture" : {
"index" : 0
},
- "metallicRoughnessTexture" : {
- "index" : 1
- },
"baseColorFactor": [ 1.000, 0.766, 0.336, 1.0 ],
"metallicFactor": 1.0,
"roughnessFactor": 0.0
DALI_TEST_EQUAL(1u, ctx.scene.GetRoots().size());
DALI_TEST_EQUAL(6u, ctx.scene.GetNodeCount());
- DALI_TEST_EQUAL(0u, ctx.resources.mEnvironmentMaps.size());
+ // Default envmap is used
+ DALI_TEST_EQUAL(1u, ctx.resources.mEnvironmentMaps.size());
auto& materials = ctx.resources.mMaterials;
DALI_TEST_EQUAL(2u, materials.size());
const MaterialDefinition materialGroundTruth[]{
- {MaterialDefinition::ALBEDO | MaterialDefinition::METALLIC | MaterialDefinition::ROUGHNESS |
- MaterialDefinition::EMISSIVE | MaterialDefinition::OCCLUSION |
- MaterialDefinition::NORMAL | MaterialDefinition::TRANSPARENCY | MaterialDefinition::GLTF_CHANNELS |
+ {MaterialDefinition::ALBEDO | MaterialDefinition::EMISSIVE | MaterialDefinition::OCCLUSION |
+ MaterialDefinition::NORMAL | MaterialDefinition::TRANSPARENCY |
(0x80 << MaterialDefinition::ALPHA_CUTOFF_SHIFT),
0,
- Vector4(1.f, .766f, .336f, 1.f),
- Vector3(0.2, 0.1, 0.0),
+ Color::WHITE,
1.f,
0.f,
+ Vector4(1.000, 0.766, 0.336, 1.0),
+ 1.f,
1.f,
+ Vector3(0.2, 0.1, 0.0),
+ true,
+ false,
+ true,
{
{MaterialDefinition::ALBEDO,
{"AnimatedCube_BaseColor.png",
SamplerFlags::Encode(FilterMode::LINEAR_MIPMAP_LINEAR, FilterMode::LINEAR, WrapMode::CLAMP_TO_EDGE, WrapMode::REPEAT)}},
- {MaterialDefinition::METALLIC | MaterialDefinition::ROUGHNESS | MaterialDefinition::GLTF_CHANNELS,
- {"AnimatedCube_MetallicRoughness.png",
- SamplerFlags::Encode(FilterMode::NEAREST_MIPMAP_LINEAR, FilterMode::NEAREST, WrapMode::CLAMP_TO_EDGE, WrapMode::MIRRORED_REPEAT)}},
{MaterialDefinition::NORMAL,
{"AnimatedCube_BaseColor.png",
SamplerFlags::Encode(FilterMode::LINEAR_MIPMAP_LINEAR, FilterMode::LINEAR, WrapMode::CLAMP_TO_EDGE, WrapMode::REPEAT)}},
MaterialDefinition::EMISSIVE | MaterialDefinition::OCCLUSION |
MaterialDefinition::NORMAL | MaterialDefinition::GLTF_CHANNELS,
0,
- Vector4(1.f, .766f, .336f, 1.f),
- Vector3(0.2, 0.1, 0.0),
+ Color::WHITE,
1.f,
0.f,
+ Vector4(1.000, 0.766, 0.336, 1.0),
1.f,
+ 1.f,
+ Vector3(0.2, 0.1, 0.0),
+ true,
+ true,
+ true,
{
{MaterialDefinition::ALBEDO,
{"AnimatedCube_BaseColor.png",
DALI_TEST_EQUAL(md.mColor, m.mColor);
DALI_TEST_EQUAL(md.mMetallic, m.mMetallic);
DALI_TEST_EQUAL(md.mRoughness, m.mRoughness);
+ DALI_TEST_EQUAL(md.mBaseColorFactor, m.mBaseColorFactor);
+ DALI_TEST_EQUAL(md.mNormalScale, m.mNormalScale);
+ DALI_TEST_EQUAL(md.mOcclusionStrength, m.mOcclusionStrength);
+ DALI_TEST_EQUAL(md.mEmissiveFactor, m.mEmissiveFactor);
+ DALI_TEST_EQUAL(md.mNeedAlbedoTexture, m.mNeedAlbedoTexture);
+ DALI_TEST_EQUAL(md.mNeedMetallicRoughnessTexture, m.mNeedMetallicRoughnessTexture);
+ DALI_TEST_EQUAL(md.mNeedNormalTexture, m.mNeedNormalTexture);
DALI_TEST_EQUAL(md.mTextureStages.size(), m.mTextureStages.size());
auto iTexture = md.mTextureStages.begin();
Accessor{Blob{0, 0}, {}},
Accessor{Blob{0, 0}, {}},
Accessor{Blob{0, 0}, {}},
+ Accessor{Blob{0, 0}, {}},
},
{
0,
Accessor{Blob{0, 0}, {}},
Accessor{Blob{0, 0}, {}},
Accessor{Blob{0, 0}, {}},
+ Accessor{Blob{0, 0}, {}},
},
};
&MeshDefinition::mPositions,
&MeshDefinition::mNormals,
&MeshDefinition::mTexCoords,
+ &MeshDefinition::mColors,
&MeshDefinition::mTangents,
&MeshDefinition::mJoints0,
&MeshDefinition::mWeights0})
"MorphPrimitivesTest",
"MRendererTest",
"SimpleSparseAccessor",
+ "AnimatedCube",
})
{
Context ctx;
ShaderDefinitionFactory sdf;
sdf.SetResources(ctx.resources);
auto& resources = ctx.resources;
- resources.mEnvironmentMaps.push_back({});
LoadGltfScene(TEST_RESOURCE_DIR "/MRendererTest.gltf", sdf, ctx.loadResult);
}
DALI_TEST_EQUAL(root.GetChildCount(), 1u);
- DALI_TEST_EQUAL(root.GetChildAt(0).GetProperty(Actor::Property::NAME).Get<std::string>(), "RootNode");
- DALI_TEST_EQUAL(root.GetChildAt(0).GetProperty(Actor::Property::SCALE).Get<Vector3>(), Vector3(1.0f, 1.0f, 1.0f));
+ Actor child = root.GetChildAt(0);
+
+ DALI_TEST_EQUAL(child.GetProperty(Actor::Property::NAME).Get<std::string>(), "RootNode");
+ DALI_TEST_EQUAL(child.GetProperty(Actor::Property::SCALE).Get<Vector3>(), Vector3(1.0f, 1.0f, 1.0f));
+ DALI_TEST_EQUAL(child.GetRendererCount(), 1u);
+ DALI_TEST_EQUAL(child.GetRendererAt(0).GetTextures().GetTextureCount(), 4u);
END_TEST;
}
Permutation permutations[]{
{
[](ShaderParameters& p) {},
- {},
+ {"THREE_TEX"},
RendererState::DEPTH_TEST | RendererState::DEPTH_WRITE | RendererState::CULL_BACK,
},
{
for(auto& ps : permSets)
{
- printf("%ld\n", &ps - permSets);
-
auto modelNode = new ModelNode();
modelNode->mMeshIdx = 0;
modelNode->mMaterialIdx = 0;
DALI_TEST_EQUAL(shaderDef.mRendererState, rendererState);
uint32_t definesUnmatched = shaderDef.mDefines.size();
- for(auto& d : shaderDef.mDefines)
+ for(auto& define : shaderDef.mDefines)
{
- auto iFind = defines.find(d);
+ auto iFind = defines.find(define);
if(iFind != defines.end())
{
defines.erase(iFind);
}
else
{
- printf("mismatched: %s\n", d.c_str());
break;
}
}
DALI_TEST_CHECK(defines.empty());
DALI_TEST_EQUAL(0, definesUnmatched);
- printf("defines OK\n");
-
auto uMaxLOD = shaderDef.mUniforms["uMaxLOD"];
DALI_TEST_EQUAL(uMaxLOD.GetType(), Property::FLOAT);
END_TEST;
}
-
int UtcDaliAnimatedImageVisualSynchronousLoadingWithAlphaMask(void)
{
ToolkitTestApplication application;
ToolkitTestApplication application;
TestGlAbstraction& gl = application.GetGlAbstraction();
+ tet_infoline("Set cache size same as GIF frame, and try to load same image at another ImageView");
{
Property::Map propertyMap;
propertyMap.Insert(Visual::Property::TYPE, Visual::ANIMATED_IMAGE);
DALI_TEST_EQUALS(Test::WaitForEventThreadTrigger(2), true, TEST_LOCATION);
+ // Batch 2 frames. Now frame 0, 1 cached.
application.SendNotification();
application.Render(20);
DALI_TEST_EQUALS(Test::WaitForEventThreadTrigger(2), true, TEST_LOCATION);
+ // 0 frame removed. and after, batch 2 frames. Now frame 1, 2, 3 cached.
application.SendNotification();
application.Render(20);
DALI_TEST_EQUALS(gl.GetLastGenTextureId(), 4, TEST_LOCATION);
+ Visual::Base visual2 = factory.CreateVisual(propertyMap);
+ DummyControl dummyControl2 = DummyControl::New(true);
+ Impl::DummyControl& dummyImpl2 = static_cast<Impl::DummyControl&>(dummyControl2.GetImplementation());
+ dummyImpl2.RegisterVisual(DummyControl::Property::TEST_VISUAL, visual2);
+ application.GetScene().Add(dummyControl2);
+
+ tet_infoline("Add new view with same url");
+
+ application.SendNotification();
+ application.Render();
+
+ // Note that we only re-load 0 frame.
+ DALI_TEST_EQUALS(Test::WaitForEventThreadTrigger(1), true, TEST_LOCATION);
+
+ tet_infoline("Test that we don't try to re-load new image cause it cached");
+ DALI_TEST_EQUALS(Test::WaitForEventThreadTrigger(1, 1), false, TEST_LOCATION);
+
+ // Batch 2 frames. Now visual frame 1, 2, 3 cached and visual2 frame 0, 1 cached.
+ application.SendNotification();
+ application.Render(20);
+
+ DALI_TEST_EQUALS(gl.GetLastGenTextureId(), 5, TEST_LOCATION);
+
+ textureTrace.Reset();
+
+ tet_infoline("Load some many frames");
+
+ const int repeatCount = 10;
+ for(int repeat = 0; repeat < repeatCount; ++repeat)
+ {
+ Test::EmitGlobalTimerSignal();
+ application.SendNotification();
+ application.Render(2000);
+ }
+
+ DALI_TEST_EQUALS(textureTrace.FindMethod("GenTextures"), false, TEST_LOCATION); // A new texture should NOT be generated.
+ DALI_TEST_EQUALS(gl.GetLastGenTextureId(), 5, TEST_LOCATION);
+
+ textureTrace.Reset();
+
dummyControl.Unparent();
+ dummyControl2.Unparent();
}
tet_infoline("Test that removing the visual from stage deletes all textures");
application.SendNotification();
propertyMap.Insert(ImageVisual::Property::FRAME_DELAY, 20);
propertyMap.Insert(ImageVisual::Property::ALPHA_MASK_URL, TEST_MASK_IMAGE_FILE_NAME);
-
VisualFactory factory = VisualFactory::Get();
Visual::Base visual = factory.CreateVisual(propertyMap);
Property::Map imageMap;
imageMap[ImageVisual::Property::URL] = url.GetUrl();
+ imageMap[ImageVisual::Property::DESIRED_HEIGHT] = 600;
+ imageMap[ImageVisual::Property::DESIRED_WIDTH] = 600;
+ imageMap[ImageVisual::Property::ATLASING] = true;
+
+ // No atlasing with big image
+ ImageView imageView_bigdesired = ImageView::New();
+ imageView_bigdesired.SetProperty(ImageView::Property::IMAGE, imageMap);
+ imageView_bigdesired.SetProperty(Toolkit::Control::Property::PADDING, Extents(10u, 10u, 10u, 10u));
+
+ imageMap[ImageVisual::Property::DESIRED_HEIGHT] = 0;
+ imageMap[ImageVisual::Property::DESIRED_WIDTH] = 0;
+
+ // No atlasing with zero desired size
+ ImageView imageView_nodesired = ImageView::New();
+ imageView_nodesired.SetProperty(ImageView::Property::IMAGE, imageMap);
+ imageView_nodesired.SetProperty(Toolkit::Control::Property::PADDING, Extents(10u, 10u, 10u, 10u));
+
imageMap[ImageVisual::Property::DESIRED_HEIGHT] = 34;
imageMap[ImageVisual::Property::DESIRED_WIDTH] = 34;
- imageMap[ImageVisual::Property::ATLASING] = true;
ImageView imageView = ImageView::New();
imageView.SetProperty(ImageView::Property::IMAGE, imageMap);
// By default, Aysnc loading is used
// loading is not started if the actor is offScene
-
application.GetScene().Add(imageView);
+ application.GetScene().Add(imageView_bigdesired);
+ application.GetScene().Add(imageView_nodesired);
application.SendNotification();
application.Render(16);
+
+ // loading started, this waits for the loader thread for max 30 seconds
+ DALI_TEST_EQUALS(Test::WaitForEventThreadTrigger(1), true, TEST_LOCATION);
+
application.Render(16);
application.SendNotification();
// Sync loading is used
Property::Map syncLoadingMap;
syncLoadingMap["url"] = url.GetUrl();
+ syncLoadingMap["alphaMaskUrl"] = gImage_34_RGBA;
syncLoadingMap["desiredHeight"] = 34;
syncLoadingMap["desiredWidth"] = 34;
syncLoadingMap["synchronousLoading"] = true;
END_TEST;
}
-int UtcDaliKeyboardFocusManagerWithVisible(void)
-{
- ToolkitTestApplication application;
-
- tet_infoline(" UtcDaliKeyboardFocusManagerWithVisible");
-
- KeyboardFocusManager manager = KeyboardFocusManager::Get();
- DALI_TEST_CHECK(manager);
-
- // Create the first actor and add it to the stage
- Actor first = Actor::New();
- first.SetProperty(Actor::Property::KEYBOARD_FOCUSABLE, true);
- application.GetScene().Add(first);
-
- // Create the second actor and add it to the first actor.
- Actor second = Actor::New();
- second.SetProperty(Actor::Property::KEYBOARD_FOCUSABLE, true);
- first.Add(second);
-
- // Check that no actor is being focused yet.
- DALI_TEST_CHECK(manager.GetCurrentFocusActor() == Actor());
-
- // Check that the focus is set on the first actor
- DALI_TEST_CHECK(manager.SetCurrentFocusActor(first) == true);
- DALI_TEST_CHECK(manager.GetCurrentFocusActor() == first);
-
- // Set visible false.
- first.SetProperty(Actor::Property::VISIBLE, false);
-
- // Check that it will fail to set focus on the second actor as it's not focusable
- DALI_TEST_CHECK(manager.SetCurrentFocusActor(second) == false);
- DALI_TEST_CHECK(manager.GetCurrentFocusActor() == first);
-
- // Set visible true.
- first.SetProperty(Actor::Property::VISIBLE, true);
-
- // Check that the focus is set on the second actor
- DALI_TEST_CHECK(manager.SetCurrentFocusActor(second) == true);
- DALI_TEST_CHECK(manager.GetCurrentFocusActor() == second);
-
- END_TEST;
-}
-
int UtcDaliKeyboardFocusManagerFocusFinderRootActor(void)
{
ToolkitTestApplication application;
Dali::Toolkit::DevelKeyboardFocusManager::ResetFocusFinderRootActor(manager);
END_TEST;
-}
\ No newline at end of file
+}
#version 300 es
+// Original Code
+// https://github.com/KhronosGroup/glTF-Sample-Viewer/blob/glTF-WebGL-PBR/shaders/pbr-frag.glsl
+// Commit dc84b5e374fb3d23153d2248a338ef88173f9eb6
+//
+// This fragment shader defines a reference implementation for Physically Based Shading of
+// a microfacet surface material defined by a glTF model.For the DamagedHelmet.gltf and its Assets
+//
+// References:
+// [1] Real Shading in Unreal Engine 4
+// http://blog.selfshadow.com/publications/s2013-shading-course/karis/s2013_pbs_epic_notes_v2.pdf
+// [2] Physically Based Shading at Disney
+// http://blog.selfshadow.com/publications/s2012-shading-course/burley/s2012_pbs_disney_brdf_notes_v3.pdf
+// [3] README.md - Environment Maps
+// https://github.com/KhronosGroup/glTF-Sample-Viewer/#environment-maps
+// [4] \"An Inexpensive BRDF Model for Physically based Rendering\" by Christophe Schlick
+// https://www.cs.virginia.edu/~jdl/bib/appearance/analytic%20models/schlick94b.pdf
+
#ifdef HIGHP
- precision highp float;
+precision highp float;
#else
- precision mediump float;
+precision mediump float;
#endif
#ifdef THREE_TEX
#ifdef GLTF_CHANNELS
-// https://github.com/KhronosGroup/glTF/tree/master/specification/2.0#pbrmetallicroughnessmetallicroughnesstexture
#define METALLIC b
#define ROUGHNESS g
#else //GLTF_CHANNELS
#endif //GLTF_CHANNELS
#endif //THREE_TEX
-#ifdef THREE_TEX
- uniform sampler2D sAlbedoAlpha;
- uniform sampler2D sMetalRoughness;
- uniform sampler2D sNormal;
-
-#ifdef ALPHA_TEST
- uniform float uAlphaThreshold;
-#endif //ALPHA_TEST
+uniform lowp vec4 uColorFactor;
+uniform lowp float uMetallicFactor;
+uniform lowp float uRoughnessFactor;
-#else
- uniform sampler2D sAlbedoMetal;
- uniform sampler2D sNormalRoughness;
+#ifdef THREE_TEX
+#ifdef BASECOLOR_TEX
+uniform sampler2D sAlbedoAlpha;
+#endif // BASECOLOR_TEX
+#ifdef METALLIC_ROUGHNESS_TEX
+uniform sampler2D sMetalRoughness;
+#endif // METALLIC_ROUGHNESS_TEX
+#ifdef NORMAL_TEX
+uniform sampler2D sNormal;
+uniform float uNormalScale;
+#endif // NORMAL_TEX
+#else // THREE_TEX
+uniform sampler2D sAlbedoMetal;
+uniform sampler2D sNormalRoughness;
#endif
#ifdef OCCLUSION
- uniform sampler2D sOcclusion;
- uniform float uOcclusionStrength;
+uniform sampler2D sOcclusion;
+uniform float uOcclusionStrength;
#endif
#ifdef EMISSIVE
- uniform sampler2D sEmissive;
- uniform vec3 uEmissiveFactor;
+uniform sampler2D sEmissive;
+uniform vec3 uEmissiveFactor;
#endif
-uniform samplerCube sDiffuse;
-uniform samplerCube sSpecular;
-
-// Number of mip map levels in the texture
-uniform float uMaxLOD;
-
-// Transformation matrix of the cubemap texture
-uniform mat4 uCubeMatrix;
-
-uniform vec4 uColor;
-uniform float uMetallicFactor;
-uniform float uRoughnessFactor;
-
-//IBL Light intensity
+//// For IBL
+uniform samplerCube sDiffuseEnvSampler;
+uniform samplerCube sSpecularEnvSampler;
+uniform sampler2D sbrdfLUT;
uniform float uIblIntensity;
+// For Alpha Mode.
+uniform lowp float uOpaque;
+uniform lowp float uMask;
+uniform lowp float uAlphaThreshold;
+
// TODO: Multiple texture coordinate will be supported.
-in vec2 vUV;
-in vec3 vNormal;
-in vec3 vTangent;
-in vec3 vViewVec;
+in lowp vec2 vUV;
+in lowp mat3 vTBN;
+in lowp vec4 vColor;
+in highp vec3 vPositionToCamera;
out vec4 FragColor;
-// Functions for BRDF calculation come from
-// https://www.unrealengine.com/blog/physically-based-shading-on-mobile
-// Based on the paper by Dimitar Lazarov
-// http://blog.selfshadow.com/publications/s2013-shading-course/lazarov/s2013_pbs_black_ops_2_notes.pdf
-vec3 EnvBRDFApprox( vec3 SpecularColor, float Roughness, float NoV )
+struct PBRInfo
{
- const vec4 c0 = vec4( -1.0, -0.0275, -0.572, 0.022 );
- const vec4 c1 = vec4( 1.0, 0.0425, 1.04, -0.04 );
- vec4 r = Roughness * c0 + c1;
- float a004 = min( r.x * r.x, exp2( -9.28 * NoV ) ) * r.x + r.y;
- vec2 AB = vec2( -1.04, 1.04 ) * a004 + r.zw;
-
- return SpecularColor * AB.x + AB.y;
+ mediump float NdotL; // cos angle between normal and light direction
+ mediump float NdotV; // cos angle between normal and view direction
+ mediump float NdotH; // cos angle between normal and half vector
+ mediump float VdotH; // cos angle between view direction and half vector
+ mediump vec3 reflectance0; // full reflectance color (normal incidence angle)
+ mediump vec3 reflectance90; // reflectance color at grazing angle
+ lowp float alphaRoughness; // roughness mapped to a more linear change in the roughness (proposed by [2])
+};
+
+const float M_PI = 3.141592653589793;
+const float c_MinRoughness = 0.04;
+
+vec3 specularReflection(PBRInfo pbrInputs)
+{
+ return pbrInputs.reflectance0 + (pbrInputs.reflectance90 - pbrInputs.reflectance0) * pow(clamp(1.0 - pbrInputs.VdotH, 0.0, 1.0), 5.0);
}
-void main()
+float geometricOcclusion(PBRInfo pbrInputs)
{
- // We get information from the maps (albedo, normal map, roughness, metalness
- // I access the maps in the order they will be used
-#ifdef THREE_TEX
- vec4 albedoAlpha = texture(sAlbedoAlpha, vUV.st);
- float alpha = albedoAlpha.a;
-#ifdef ALPHA_TEST
- if (alpha <= uAlphaThreshold)
- {
- discard;
- }
-#endif //ALPHA_TEST
- vec3 albedoColor = albedoAlpha.rgb * uColor.rgb;
-
- vec4 metalRoughness = texture(sMetalRoughness, vUV.st);
- float metallic = metalRoughness.METALLIC * uMetallicFactor;
- float roughness = metalRoughness.ROUGHNESS * uRoughnessFactor;
-
- vec3 normalMap = texture(sNormal, vUV.st).rgb;
-#else //THREE_TEX
- vec4 albedoMetal = texture(sAlbedoMetal, vUV.st);
- vec3 albedoColor = albedoMetal.rgb * uColor.rgb;
- float metallic = albedoMetal.a * uMetallicFactor;
-
- vec4 normalRoughness = texture(sNormalRoughness, vUV.st);
- vec3 normalMap = normalRoughness.rgb;
- float roughness = normalRoughness.a * uRoughnessFactor;
-#endif
- //Normalize vectors
- vec3 normal = normalize(vNormal);
- vec3 tangent = normalize(vTangent);
-
- // NOTE: normal and tangent have to be orthogonal for the result of the cross()
- // product to be a unit vector. We might find that we need to normalize().
- vec3 bitangent = cross(normal, tangent);
-
- vec3 viewVec = normalize(vViewVec);
-
- // Create Inverse Local to world matrix
- mat3 vInvTBN = mat3(tangent, bitangent, normal);
-
- // Get normal map info in world space
- normalMap = normalize(normalMap - 0.5);
- vec3 newNormal = vInvTBN * normalMap.rgb;
+ mediump float NdotL = pbrInputs.NdotL;
+ mediump float NdotV = pbrInputs.NdotV;
+ lowp float r = pbrInputs.alphaRoughness;
- // Calculate normal dot view vector
- float NoV = max(dot(newNormal, -viewVec), 0.0);
-
- // Reflect vector
- vec3 reflectionVec = reflect(viewVec, newNormal);
-
- //transform it now to environment coordinates (used when the environment rotates)
- vec3 reflecCube = (uCubeMatrix * vec4( reflectionVec, 0.0 ) ).xyz;
- reflecCube = normalize( reflecCube );
-
- //transform it now to environment coordinates
- vec3 normalCube = ( uCubeMatrix * vec4( newNormal, 0.0 ) ).xyz;
- normalCube = normalize( normalCube );
-
- // Get irradiance from diffuse cubemap
- vec3 irradiance = texture( sDiffuse, normalCube ).rgb;
-
- // Access reflection color using roughness value
- float finalLod = mix( 0.0, uMaxLOD - 2.0, roughness);
- vec3 reflectionColor = textureLod(sSpecular, reflecCube, finalLod).rgb;
-
- // We are supposed to be using DielectricColor (0.04) of a plastic (almost everything)
- // http://blog.selfshadow.com/publications/s2014-shading-course/hoffman/s2014_pbs_physics_math_slides.pdf
- // however that seems to prevent achieving very dark tones (i.e. get dark gray blacks).
- vec3 DiffuseColor = albedoColor - albedoColor * metallic; // 1 mad
- vec3 SpecularColor = mix( vec3(0.04), albedoColor, metallic); // 2 mad
-
- // Calculate specular color using Magic Function (takes original roughness and normal dot view).
- vec3 specColor = reflectionColor.rgb * EnvBRDFApprox(SpecularColor, roughness, NoV );
+ lowp float attenuationL = 2.0 * NdotL / (NdotL + sqrt(r * r + (1.0 - r * r) * (NdotL * NdotL)));
+ lowp float attenuationV = 2.0 * NdotV / (NdotV + sqrt(r * r + (1.0 - r * r) * (NdotV * NdotV)));
+ return attenuationL * attenuationV;
+}
- // Multiply the result by albedo texture and do energy conservation
- vec3 diffuseColor = irradiance * DiffuseColor;
+float microfacetDistribution(PBRInfo pbrInputs)
+{
+ mediump float roughnessSq = pbrInputs.alphaRoughness * pbrInputs.alphaRoughness;
+ lowp float f = (pbrInputs.NdotH * roughnessSq - pbrInputs.NdotH) * pbrInputs.NdotH + 1.0;
+ return roughnessSq / (M_PI * f * f);
+}
- // Final color is the sum of the diffuse and specular term
- vec3 finalColor = diffuseColor + specColor;
+vec3 linear(vec3 color)
+{
+ return pow(color, vec3(2.2));
+}
- finalColor = sqrt( finalColor ) * uIblIntensity;
+void main()
+{
+ // Metallic and Roughness material properties are packed together
+ // In glTF, these factors can be specified by fixed scalar values
+ // or from a metallic-roughness map
+ // Roughness is stored in the 'g' channel, metallic is stored in the 'b' channel.
+ // This layout intentionally reserves the 'r' channel for (optional) occlusion map data
+ lowp float metallic = uMetallicFactor;
+ lowp float perceptualRoughness = uRoughnessFactor;
+ // If there isn't normal texture, use surface normal
+ mediump vec3 n = normalize(vTBN[2].xyz);
+#ifdef THREE_TEX
+ // The albedo may be defined from a base texture or a flat color
+#ifdef BASECOLOR_TEX
+ lowp vec4 baseColor = texture(sAlbedoAlpha, vUV);
+ baseColor = vec4(linear(baseColor.rgb), baseColor.w) * uColorFactor;
+#else // BASECOLOR_TEX
+ lowp vec4 baseColor = vColor * uColorFactor;
+#endif // BASECOLOR_TEX
+
+#ifdef METALLIC_ROUGHNESS_TEX
+ lowp vec4 metrou = texture(sMetalRoughness, vUV);
+ metallic = metrou.METALLIC * metallic;
+ perceptualRoughness = metrou.ROUGHNESS * perceptualRoughness;
+#endif // METALLIC_ROUGHNESS_TEX
+
+#ifdef NORMAL_TEX
+ n = texture(sNormal, vUV).rgb;
+ n = normalize(vTBN * ((2.0 * n - 1.0) * vec3(uNormalScale, uNormalScale, 1.0)));
+#endif // NORMAL_TEX
+#else // THREE_TEX
+ vec4 albedoMetal = texture(sAlbedoMetal, vUV);
+ lowp vec4 baseColor = vec4(linear(albedoMetal.rgb), 1.0) * vColor * uColorFactor;
+
+ metallic = albedoMetal.METALLIC * metallic;
+
+ vec4 normalRoughness = texture(sNormalRoughness, vUV);
+ perceptualRoughness = normalRoughness.ROUGHNESS * perceptualRoughness;
+
+ n = normalRoughness.rgb;
+ n = normalize(vTBN * ((2.0 * n - 1.0) * vec3(uNormalScale, uNormalScale, 1.0)));
+#endif // THREE_TEX
+
+ // The value of uOpaque and uMask can be 0.0 or 1.0.
+ // If uOpaque is 1.0, alpha value of final color is 1.0;
+ // If uOpaque is 0.0 and uMask is 1.0, alpha value of final color is 0.0 when input alpha is lower than uAlphaThreshold or
+ // 1.0 when input alpha is larger than uAlphaThreshold.
+ // https://www.khronos.org/registry/glTF/specs/2.0/glTF-2.0.html#_material_alphamode
+ baseColor.a = mix(baseColor.a, 1.0, uOpaque);
+ baseColor.a = min(mix(baseColor.a, floor(baseColor.a - uAlphaThreshold + 1.0), uMask), 1.0);
+
+ metallic = clamp(metallic, 0.0, 1.0);
+ // Roughness is authored as perceptual roughness; as is convention,
+ // convert to material roughness by squaring the perceptual roughness [2].
+ perceptualRoughness = clamp(perceptualRoughness, c_MinRoughness, 1.0);
+ lowp float alphaRoughness = perceptualRoughness * perceptualRoughness;
+
+ lowp vec3 f0 = vec3(0.04);
+ lowp vec3 diffuseColor = baseColor.rgb * (vec3(1.0) - f0);
+ diffuseColor *= (1.0 - metallic);
+ lowp vec3 specularColor = mix(f0, baseColor.rgb, metallic);
+
+ // Compute reflectance.
+ lowp float reflectance = max(max(specularColor.r, specularColor.g), specularColor.b);
+
+ // For typical incident reflectance range (between 4% to 100%) set the grazing reflectance to 100% for typical fresnel effect.
+ // For very low reflectance range on highly diffuse objects (below 4%), incrementally reduce grazing reflecance to 0%.
+ lowp float reflectance90 = clamp(reflectance * 25.0, 0.0, 1.0);
+ lowp vec3 specularEnvironmentR0 = specularColor.rgb;
+ lowp vec3 specularEnvironmentR90 = vec3(1.0, 1.0, 1.0) * reflectance90;
+
+ mediump vec3 v = normalize(vPositionToCamera); // Vector from surface point to camera
+ mediump float NdotV = clamp(abs(dot(n, v)), 0.001, 1.0);
+ mediump vec3 reflection = -normalize(reflect(v, n));
+
+ lowp vec3 color = vec3(0.0);
+ lowp vec3 diffuseLight = linear(texture(sDiffuseEnvSampler, n).rgb);
+ lowp vec3 specularLight = linear(texture(sSpecularEnvSampler, reflection).rgb);
+ // retrieve a scale and bias to F0. See [1], Figure 3
+ lowp vec3 brdf = linear(texture(sbrdfLUT, vec2(NdotV, 1.0 - perceptualRoughness)).rgb);
+
+ lowp vec3 diffuse = diffuseLight * diffuseColor;
+ lowp vec3 specular = specularLight * (specularColor * brdf.x + brdf.y);
+ color += (diffuse + specular) * uIblIntensity;
#ifdef OCCLUSION
- float ao = texture(sOcclusion, vUV.st).r;
- finalColor = mix( finalColor, finalColor * ao, uOcclusionStrength );
-#endif
+ lowp float ao = texture(sOcclusion, vUV).r;
+ color = mix(color, color * ao, uOcclusionStrength);
+#endif // OCCLUSION
#ifdef EMISSIVE
- vec3 emissive = texture( sEmissive, vUV.st ).rgb * uEmissiveFactor;
- finalColor += emissive;
-#endif
+ lowp vec3 emissive = linear(texture(sEmissive, vUV).rgb) * uEmissiveFactor;
+ color += emissive;
+#endif // EMISSIVE
-#ifdef THREE_TEX
- FragColor = vec4( finalColor, alpha );
-#else //THREE_TEX
- FragColor = vec4( finalColor, 1.0 );
-#endif //THREE_TEX
+ FragColor = vec4(pow(color, vec3(1.0 / 2.2)), baseColor.a);
}
#version 300 es
+// Original Code
+// https://github.com/KhronosGroup/glTF-Sample-Viewer/blob/glTF-WebGL-PBR/shaders/pbr-vert.glsl
+// Commit dc84b5e374fb3d23153d2248a338ef88173f9eb6
+
#ifdef HIGHP
precision highp float;
#else
in vec3 aPosition;
in vec2 aTexCoord;
in vec3 aNormal;
+
+#ifdef VEC4_TANGENT
+in vec4 aTangent;
+#else
in vec3 aTangent;
+#endif
+
+in vec4 aVertexColor;
#ifdef MORPH
uniform sampler2D sBlendShapeGeometry;
#endif
out vec2 vUV;
-out vec3 vNormal;
-out vec3 vTangent;
-out vec3 vViewVec;
+out lowp mat3 vTBN;
+out lowp vec4 vColor;
+out highp vec3 vPositionToCamera;
-uniform highp mat4 uMvpMatrix;
uniform highp mat4 uViewMatrix;
uniform mat3 uNormalMatrix;
uniform mat4 uModelMatrix;
-uniform mat4 uModelView;
uniform mat4 uProjection;
+uniform lowp float uHasVertexColor;
#ifdef SKINNING
in vec4 aJoints;
{
vec4 position = vec4(aPosition, 1.0);
vec3 normal = aNormal;
- vec3 tangent = aTangent;
+ vec3 tangent = aTangent.xyz;
#ifdef MORPH
int width = textureSize( sBlendShapeGeometry, 0 ).x;
tangent = (bone * vec4(tangent, 0.0)).xyz;
#endif
- vec4 vPosition = uModelMatrix * position;
+ vec4 positionW = uModelMatrix * position;
+ vec4 positionV = uViewMatrix * positionW;
- vNormal = normalize(uNormalMatrix * normal);
+ vPositionToCamera = transpose(mat3(uViewMatrix)) * -vec3(positionV.xyz / positionV.w);
- vTangent = normalize(uNormalMatrix * tangent);
-
-
- vec4 viewPosition = uViewMatrix * vPosition;
- gl_Position = uProjection * viewPosition;
+ lowp vec3 bitangent = cross(normal, tangent);
+#ifdef VEC4_TANGENT
+ bitangent *= aTangent.w;
+#endif
+ vTBN = mat3(uModelMatrix) * mat3(tangent, bitangent, normal);
#ifdef FLIP_V
vUV = vec2(aTexCoord.x, 1.0 - aTexCoord.y);
vUV = aTexCoord;
#endif
- vViewVec = viewPosition.xyz;
+ vColor = mix(vec4(1.0f), aVertexColor, uHasVertexColor);
+
+ gl_Position = uProjection * positionV;
}
/*
- * Copyright (c) 2021 Samsung Electronics Co., Ltd.
+ * Copyright (c) 2022 Samsung Electronics Co., Ltd.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
*
*/
+// EXTERNAL INCLUDES
+#include <dali/devel-api/adaptor-framework/environment-variable.h>
+#include <dali/devel-api/adaptor-framework/image-loading.h>
+
// INTERNAL INCLUDES
#include "dali-scene-loader/public-api/environment-definition.h"
#include "dali-scene-loader/public-api/utils.h"
+namespace
+{
+#define TOKEN_STRING(x) #x
+std::string GetDaliImagePath()
+{
+ return (nullptr == DALI_IMAGE_DIR) ? Dali::EnvironmentVariable::GetEnvironmentVariable(TOKEN_STRING(DALI_IMAGE_DIR)) : DALI_IMAGE_DIR;
+}
+} // unnamed namespace
+
namespace Dali
{
namespace SceneLoader
{
+namespace
+{
+const std::string PRE_COMPUTED_BRDF_TEXTURE_FILE_NAME = "brdfLUT.png";
+}
+
EnvironmentDefinition::RawData
EnvironmentDefinition::LoadRaw(const std::string& environmentsPath) const
{
loadFn(mDiffuseMapPath, raw.mDiffuse);
loadFn(mSpecularMapPath, raw.mSpecular);
+
+ if(mUseBrdfTexture)
+ {
+ Devel::PixelBuffer pixelBuffer = LoadImageFromFile(GetDaliImagePath() + PRE_COMPUTED_BRDF_TEXTURE_FILE_NAME);
+ if(pixelBuffer)
+ {
+ raw.mBrdf = Devel::PixelBuffer::Convert(pixelBuffer);
+ }
+ }
return raw;
}
{
textures.mSpecular = raw.mSpecular.CreateTexture();
}
+
+ if(raw.mBrdf)
+ {
+ textures.mBrdf = Texture::New(TextureType::TEXTURE_2D, raw.mBrdf.GetPixelFormat(), raw.mBrdf.GetWidth(), raw.mBrdf.GetHeight());
+ textures.mBrdf.Upload(raw.mBrdf);
+ }
return textures;
}
{
Texture mDiffuse; // irradiance
Texture mSpecular; // radiance
+ Texture mBrdf; // pre-computed brdf
bool IsLoaded() const
{
struct RawData
{
- CubeData mDiffuse;
- CubeData mSpecular;
+ CubeData mDiffuse;
+ CubeData mSpecular;
+ PixelData mBrdf;
};
using EnvironmentData = std::pair<EnvironmentDefinition, Textures>;
std::string mSpecularMapPath;
Quaternion mCubeOrientation = Quaternion::IDENTITY;
float mIblIntensity = 1.0f;
+ bool mUseBrdfTexture = false;
};
} // namespace SceneLoader
const std::string ORIENTATION_PROPERTY("orientation");
const std::string SCALE_PROPERTY("scale");
const std::string BLEND_SHAPE_WEIGHTS_UNIFORM("uBlendShapeWeight");
-
const std::string MRENDERER_MODEL_IDENTIFICATION("M-Renderer");
-
const std::string ROOT_NODE_NAME("RootNode");
const Vector3 SCALE_TO_ADJUST(100.0f, 100.0f, 100.0f);
+constexpr float DEFAULT_INTENSITY = 0.5f;
+
const Geometry::Type GLTF2_TO_DALI_PRIMITIVES[]{
Geometry::POINTS,
Geometry::LINES,
{gt::Attribute::NORMAL, &MeshDefinition::mNormals, sizeof(Vector3)},
{gt::Attribute::TANGENT, &MeshDefinition::mTangents, sizeof(Vector3)},
{gt::Attribute::TEXCOORD_0, &MeshDefinition::mTexCoords, sizeof(Vector2)},
+ {gt::Attribute::COLOR_0, &MeshDefinition::mColors, sizeof(Vector4)},
{gt::Attribute::JOINTS_0, &MeshDefinition::mJoints0, sizeof(Vector4)},
{gt::Attribute::WEIGHTS_0, &MeshDefinition::mWeights0, sizeof(Vector4)},
};
matDef.SetAlphaCutoff(std::min(1.f, std::max(0.f, m.mAlphaCutoff)));
}
- matDef.mColor = pbr.mBaseColorFactor;
+ matDef.mBaseColorFactor = pbr.mBaseColorFactor;
matDef.mTextureStages.reserve(!!pbr.mBaseColorTexture + !!pbr.mMetallicRoughnessTexture + !!m.mNormalTexture + !!m.mOcclusionTexture + !!m.mEmissiveTexture);
if(pbr.mBaseColorTexture)
// TODO: and there had better be one
matDef.mFlags |= semantic;
}
+ else
+ {
+ matDef.mNeedAlbedoTexture = false;
+ }
matDef.mMetallic = pbr.mMetallicFactor;
matDef.mRoughness = pbr.mRoughnessFactor;
// TODO: and there had better be one
matDef.mFlags |= semantic;
}
+ else
+ {
+ matDef.mNeedMetallicRoughnessTexture = false;
+ }
+ matDef.mNormalScale = m.mNormalTexture.mScale;
if(m.mNormalTexture)
{
const auto semantic = MaterialDefinition::NORMAL;
// TODO: and there had better be one
matDef.mFlags |= semantic;
}
+ else
+ {
+ matDef.mNeedNormalTexture = false;
+ }
// TODO: handle doubleSided
if(m.mOcclusionTexture)
auto& accPositions = *attribs.find(gt::Attribute::POSITION)->second;
meshDef.mPositions = ConvertMeshPrimitiveAccessor(accPositions);
+ // glTF2 support vector4 tangent for mesh.
+ // https://www.khronos.org/registry/glTF/specs/2.0/glTF-2.0.html#meshes-overview
+ meshDef.mTangentType = Property::VECTOR4;
const bool needNormalsTangents = accPositions.mType == gt::AccessorType::VEC3;
for(auto& am : ATTRIBUTE_MAPPINGS)
auto& accessor = meshDef.*(am.mAccessor);
accessor = ConvertMeshPrimitiveAccessor(*iFind->second);
- // Fixing up -- a few of glTF2 sample models have VEC4 tangents; we need VEC3s.
- if(iFind->first == gt::Attribute::TANGENT && (accessor.mBlob.mElementSizeHint > am.mElementSizeRequired))
- {
- accessor.mBlob.mStride = std::max(static_cast<uint16_t>(accessor.mBlob.mStride + accessor.mBlob.mElementSizeHint - am.mElementSizeRequired),
- accessor.mBlob.mElementSizeHint);
- accessor.mBlob.mElementSizeHint = am.mElementSizeRequired;
- }
-
if(iFind->first == gt::Attribute::JOINTS_0)
{
meshDef.mFlags |= (iFind->second->mComponentType == gt::Component::UNSIGNED_SHORT) * MeshDefinition::U16_JOINT_IDS;
js::SetObjectReader(SCENE_READER);
}
+void SetDefaultEnvironmentMap(const gt::Document& doc, ConversionContext& cctx)
+{
+ EnvironmentDefinition envDef;
+ envDef.mUseBrdfTexture = true;
+ envDef.mIblIntensity = DEFAULT_INTENSITY;
+ cctx.mOutput.mResources.mEnvironmentMaps.push_back({std::move(envDef), EnvironmentDefinition::Textures()});
+}
+
} // namespace
void LoadGltfScene(const std::string& url, ShaderDefinitionFactory& shaderFactory, LoadResult& params)
ConvertMeshes(doc, cctx);
ConvertNodes(doc, cctx, isMRendererModel);
ConvertAnimations(doc, cctx);
-
ProcessSkins(doc, cctx);
-
ProduceShaders(shaderFactory, params.mScene);
params.mScene.EnsureUniqueSkinningShaderInstances(params.mResources);
+
+ // Set Default Environment map
+ SetDefaultEnvironmentMap(doc, cctx);
}
} // namespace SceneLoader
raw.mTextures.push_back({SyncImageLoader::Load(imagesPath + iTexture->mTexture.mImageUri), iTexture->mTexture.mSamplerFlags});
++iTexture;
}
- else // single value albedo, albedo-alpha or albedo-metallic
+ else if(mNeedAlbedoTexture) // single value albedo, albedo-alpha or albedo-metallic
{
uint32_t bufferSize = 4;
uint8_t* buffer = nullptr;
raw.mTextures.push_back({SyncImageLoader::Load(imagesPath + iTexture->mTexture.mImageUri), iTexture->mTexture.mSamplerFlags});
++iTexture;
}
- else if(createMetallicRoughnessAndNormal)
+ else if(createMetallicRoughnessAndNormal && mNeedMetallicRoughnessTexture)
{
// NOTE: we want to set both metallic and roughness to 1.0; dli uses the R & A channels,
// glTF2 uses B & G, so we might as well just set all components to 1.0.
raw.mTextures.push_back({SyncImageLoader::Load(imagesPath + iTexture->mTexture.mImageUri), iTexture->mTexture.mSamplerFlags});
++iTexture;
}
- else if(createMetallicRoughnessAndNormal)
+ else if(mNeedNormalTexture)
{
- const auto bufferSize = 3;
- uint8_t* buffer = new uint8_t[bufferSize]{0x7f, 0x7f, 0xff}; // normal of (0, 0, 1)
- raw.mTextures.push_back({PixelData::New(buffer, bufferSize, 1, 1, Pixel::RGB888, PixelData::DELETE_ARRAY), SINGLE_VALUE_SAMPLER});
- }
- else // single-value normal-roughness
- {
- const auto bufferSize = 4;
- uint8_t* buffer = new uint8_t[bufferSize]{0x7f, 0x7f, 0xff, 0xff}; // normal of (0, 0, 1), roughness of 1.0
- raw.mTextures.push_back({PixelData::New(buffer, bufferSize, 1, 1, Pixel::RGBA8888, PixelData::DELETE_ARRAY), SINGLE_VALUE_SAMPLER});
+ if(createMetallicRoughnessAndNormal)
+ {
+ const auto bufferSize = 3;
+ uint8_t* buffer = new uint8_t[bufferSize]{0x7f, 0x7f, 0xff}; // normal of (0, 0, 1)
+ raw.mTextures.push_back({PixelData::New(buffer, bufferSize, 1, 1, Pixel::RGB888, PixelData::DELETE_ARRAY), SINGLE_VALUE_SAMPLER});
+ }
+ else // single-value normal-roughness
+ {
+ const auto bufferSize = 4;
+ uint8_t* buffer = new uint8_t[bufferSize]{0x7f, 0x7f, 0xff, 0xff}; // normal of (0, 0, 1), roughness of 1.0
+ raw.mTextures.push_back({PixelData::New(buffer, bufferSize, 1, 1, Pixel::RGBA8888, PixelData::DELETE_ARRAY), SINGLE_VALUE_SAMPLER});
+ }
}
}
textureSet.SetTexture(n, envTextures.mSpecular);
textureSet.SetSampler(n, specularSampler);
+ ++n;
+ }
+
+ // If pre-computed brdf texture is defined, set the texture.
+ if(envTextures.mBrdf)
+ {
+ textureSet.SetTexture(n, envTextures.mBrdf);
}
}
else
public: // DATA
uint32_t mFlags = 0x0;
- Index mEnvironmentIdx = 0;
- Vector4 mColor = Color::WHITE;
- Vector3 mEmissiveFactor = Vector3::ZERO;
- float mMetallic = 1.f;
- float mRoughness = 1.f;
- float mOcclusionStrength = 1.f;
+ Index mEnvironmentIdx = 0;
+ Vector4 mColor = Color::WHITE;
+ float mMetallic = 1.f;
+ float mRoughness = 1.f;
+ Vector4 mBaseColorFactor = Vector4::ONE;
+ float mNormalScale = 1.f;
+ float mOcclusionStrength = 1.f;
+ Vector3 mEmissiveFactor = Vector3::ZERO;
+
+ // For the glTF, each of albedo, metallicRoughness, normal textures are not essential.
+ bool mNeedAlbedoTexture = true;
+ bool mNeedMetallicRoughnessTexture = true;
+ bool mNeedNormalTexture = true;
+
std::vector<TextureStage> mTextureStages;
};
if(mTangents.IsDefined())
{
- DALI_ASSERT_ALWAYS(((mTangents.mBlob.mLength % sizeof(Vector3) == 0) ||
- mTangents.mBlob.mStride >= sizeof(Vector3)) &&
+ uint32_t propertySize = (mTangentType == Property::VECTOR4) ? sizeof(Vector4) : sizeof(Vector3);
+ DALI_ASSERT_ALWAYS(((mTangents.mBlob.mLength % propertySize == 0) ||
+ mTangents.mBlob.mStride >= propertySize) &&
"Tangents buffer length not a multiple of element size");
const auto bufferSize = mTangents.mBlob.GetBufferSize();
std::vector<uint8_t> buffer(bufferSize);
{
ExceptionFlinger(ASSERT_LOCATION) << "Failed to read tangents from '" << meshPath << "'.";
}
+ mTangents.mBlob.ApplyMinMax(bufferSize / propertySize, reinterpret_cast<float*>(buffer.data()));
- mTangents.mBlob.ApplyMinMax(bufferSize / sizeof(Vector3), reinterpret_cast<float*>(buffer.data()));
-
- raw.mAttribs.push_back({"aTangent", Property::VECTOR3, static_cast<uint32_t>(bufferSize / sizeof(Vector3)), std::move(buffer)});
+ raw.mAttribs.push_back({"aTangent", mTangentType, static_cast<uint32_t>(bufferSize / propertySize), std::move(buffer)});
}
else if(mTangents.mBlob.mLength != 0 && hasNormals && isTriangles)
{
hasUvs ? GenerateTangentsWithUvs(raw) : GenerateTangents(raw);
}
+ if(mColors.IsDefined())
+ {
+ uint32_t propertySize = mColors.mBlob.mElementSizeHint;
+ Property::Type propertyType = (propertySize == sizeof(Vector4)) ? Property::VECTOR4 : ((propertySize == sizeof(Vector3)) ? Property::VECTOR3 : Property::NONE);
+ if(propertyType != Property::NONE)
+ {
+ DALI_ASSERT_ALWAYS(((mColors.mBlob.mLength % propertySize == 0) ||
+ mColors.mBlob.mStride >= propertySize) &&
+ "Colors buffer length not a multiple of element size");
+ const auto bufferSize = mColors.mBlob.GetBufferSize();
+ std::vector<uint8_t> buffer(bufferSize);
+ if(!ReadAccessor(mColors, binFile, buffer.data()))
+ {
+ ExceptionFlinger(ASSERT_LOCATION) << "Failed to read colors from '" << meshPath << "'.";
+ }
+ mColors.mBlob.ApplyMinMax(bufferSize / propertySize, reinterpret_cast<float*>(buffer.data()));
+
+ raw.mAttribs.push_back({"aVertexColor", propertyType, static_cast<uint32_t>(bufferSize / propertySize), std::move(buffer)});
+ }
+ }
+
if(IsSkinned())
{
if(MaskMatch(mFlags, U16_JOINT_IDS))
Accessor mPositions;
Accessor mNormals; // data can be generated based on positions
Accessor mTexCoords;
+ Accessor mColors;
Accessor mTangents; // data can be generated based on normals and texCoords (the latter isn't mandatory; the results will be better if available)
Accessor mJoints0;
Accessor mWeights0;
+ Property::Type mTangentType{Property::VECTOR3};
Blob mBlendShapeHeader;
std::vector<BlendShape> mBlendShapes;
actor.SetProperty(Actor::Property::COLOR, mColor);
+ actor.RegisterProperty("uHasVertexColor", static_cast<float>(mesh.first.mColors.IsDefined()));
+
auto& matDef = resources.mMaterials[mMaterialIdx].first;
+ actor.RegisterProperty("uColorFactor", matDef.mBaseColorFactor);
actor.RegisterProperty("uMetallicFactor", matDef.mMetallic);
actor.RegisterProperty("uRoughnessFactor", matDef.mRoughness);
+ actor.RegisterProperty("uNormalScale", matDef.mNormalScale);
if(matDef.mFlags & MaterialDefinition::OCCLUSION)
{
actor.RegisterProperty("uOcclusionStrength", matDef.mOcclusionStrength);
Index envIdx = matDef.mEnvironmentIdx;
actor.RegisterProperty("uIblIntensity", resources.mEnvironmentMaps[envIdx].first.mIblIntensity);
- const auto alphaCutoff = matDef.GetAlphaCutoff();
- if(alphaCutoff > 0.f)
+ float opaque = 0.0f;
+ float mask = 0.0f;
+ float alphaCutoff = matDef.GetAlphaCutoff();
+ if(!MaskMatch(matDef.mFlags, MaterialDefinition::TRANSPARENCY))
+ {
+ opaque = 1.0f;
+ }
+ else
{
- actor.RegisterProperty("uAlphaThreshold", alphaCutoff);
+ if(alphaCutoff > 0.f)
+ {
+ mask = 1.0f;
+ }
}
+ actor.RegisterProperty("uOpaque", opaque);
+ actor.RegisterProperty("uMask", mask);
+ actor.RegisterProperty("uAlphaThreshold", alphaCutoff);
}
void ArcNode::OnCreate(const NodeDefinition& node, NodeDefinition::CreateParams& params, Actor& actor) const
}
if(hasTransparency ||
- materialDef.CheckTextures(MaterialDefinition::ALBEDO) ||
- materialDef.CheckTextures(MaterialDefinition::METALLIC | MaterialDefinition::ROUGHNESS) ||
- materialDef.CheckTextures(MaterialDefinition::NORMAL))
+ !materialDef.CheckTextures(MaterialDefinition::ALBEDO | MaterialDefinition::METALLIC) ||
+ !materialDef.CheckTextures(MaterialDefinition::NORMAL | MaterialDefinition::ROUGHNESS))
+
{
shaderDef.mDefines.push_back("THREE_TEX");
+
+ // For the glTF, each of basecolor, metallic_roughness, normal texture is not essential.
+ if(MaskMatch(materialDef.mFlags, MaterialDefinition::ALBEDO))
+ {
+ shaderDef.mDefines.push_back("BASECOLOR_TEX");
+ }
+
+ if(materialDef.CheckTextures(MaterialDefinition::METALLIC | MaterialDefinition::ROUGHNESS))
+ {
+ shaderDef.mDefines.push_back("METALLIC_ROUGHNESS_TEX");
+ }
+
+ if(MaskMatch(materialDef.mFlags, MaterialDefinition::NORMAL))
+ {
+ shaderDef.mDefines.push_back("NORMAL_TEX");
+ }
}
if(materialDef.GetAlphaCutoff() > 0.f)
}
}
+ if(meshDef.mTangentType == Property::VECTOR4)
+ {
+ shaderDef.mDefines.push_back("VEC4_TANGENT");
+ }
+
shaderDef.mUniforms["uMaxLOD"] = 6.f;
shaderDef.mUniforms["uCubeMatrix"] = Matrix::IDENTITY;
actor.GetProperty<bool>(Actor::Property::CONNECTED_TO_SCENE) &&
actor.GetProperty<bool>(Actor::Property::VISIBLE))
{
- // If the parent's KEYBOARD_FOCUSABLE_CHILDREN is false or VISIBLE is false, it cannot have focus.
+ // If the parent's KEYBOARD_FOCUSABLE_CHILDREN is false, it cannot have focus.
Actor parent = actor.GetParent();
while(parent)
{
- if(!parent.GetProperty<bool>(DevelActor::Property::KEYBOARD_FOCUSABLE_CHILDREN) || !parent.GetProperty<bool>(Actor::Property::VISIBLE))
+ if(!parent.GetProperty<bool>(DevelActor::Property::KEYBOARD_FOCUSABLE_CHILDREN))
{
- DALI_LOG_INFO(gLogFilter, Debug::General, "[%s:%d] Parent Actor has KEYBOARD_FOCUSABLE_CHILDREN false or VISIBLE false,\n", __FUNCTION__, __LINE__);
+ DALI_LOG_INFO(gLogFilter, Debug::General, "[%s:%d] Parent Actor has KEYBOARD_FOCUSABLE_CHILDREN false\n", __FUNCTION__, __LINE__);
return false;
}
parent = parent.GetParent();
bool succeed = false;
// Go through the actor's hierarchy until we find a layout control that knows how to move the focus
- Toolkit::Control parentLayoutControl = GetParentLayoutControl(currentFocusActor);
- while(parentLayoutControl && !succeed)
+ Toolkit::Control layoutControl = IsLayoutControl(currentFocusActor) ? Toolkit::Control::DownCast(currentFocusActor) : GetParentLayoutControl(currentFocusActor);
+ while(layoutControl && !succeed)
{
- succeed = DoMoveFocusWithinLayoutControl(parentLayoutControl, currentFocusActor, direction);
- parentLayoutControl = GetParentLayoutControl(parentLayoutControl);
+ succeed = DoMoveFocusWithinLayoutControl(layoutControl, currentFocusActor, direction);
+ layoutControl = GetParentLayoutControl(layoutControl);
}
if(!succeed)
Toolkit::Control layoutControl = Toolkit::Control::DownCast(nextFocusableActor);
succeed = DoMoveFocusWithinLayoutControl(layoutControl, currentFocusActor, direction);
}
- else
+ if(!succeed)
{
- // Otherwise, just set focus to the next focusable actor
+ // Just set focus to the next focusable actor
succeed = SetCurrentFocusActor(nextFocusableActor);
}
}
if(committedFocusActor && committedFocusActor.GetProperty<bool>(Actor::Property::KEYBOARD_FOCUSABLE) && committedFocusActor.GetProperty<bool>(DevelActor::Property::USER_INTERACTION_ENABLED))
{
// Whether the commited focusable actor is a layout control
- if(IsLayoutControl(committedFocusActor))
+ if(IsLayoutControl(committedFocusActor) && committedFocusActor != control)
{
// If so, move the focus inside it.
Toolkit::Control layoutControl = Toolkit::Control::DownCast(committedFocusActor);
/*
- * Copyright (c) 2021 Samsung Electronics Co., Ltd.
+ * Copyright (c) 2022 Samsung Electronics Co., Ltd.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
{
namespace Internal
{
-typedef unsigned char PixelBuffer;
-
Texture ImageAtlas::PackToAtlas(const std::vector<PixelData>& pixelData, Dali::Vector<Vector4>& textureRects)
{
// Record each block size
// Apply the half pixel correction to avoid the color bleeding between neighbour blocks
textureRects[index].x = (static_cast<float>(packPositionX) + 0.5f) / atlasWidth; // left
- textureRects[index].y = (static_cast<float>(packPositionY) + 0.5f) / atlasHeight; // right
+ textureRects[index].y = (static_cast<float>(packPositionY) + 0.5f) / atlasHeight; // top
textureRects[index].z = (static_cast<float>(packPositionX + pixelData[index].GetWidth()) - 0.5f) / atlasWidth; // right
textureRects[index].w = (static_cast<float>(packPositionY + pixelData[index].GetHeight()) - 0.5f) / atlasHeight; // bottom
}
}
}
- unsigned int packPositionX = 0;
- unsigned int packPositionY = 0;
+ uint32_t packPositionX = 0;
+ uint32_t packPositionY = 0;
if(mPacker.Pack(dimensions.GetWidth(), dimensions.GetHeight(), packPositionX, packPositionY))
{
- unsigned short loadId = GetImplementation(mAsyncLoader).Load(url, size, fittingMode, SamplingMode::BOX_THEN_LINEAR, orientationCorrection, DevelAsyncImageLoader::PreMultiplyOnLoad::OFF);
+ uint32_t loadId = GetImplementation(mAsyncLoader).Load(url, size, fittingMode, SamplingMode::BOX_THEN_LINEAR, orientationCorrection, DevelAsyncImageLoader::PreMultiplyOnLoad::OFF);
mLoadingTaskInfoContainer.PushBack(new LoadingTaskInfo(loadId, packPositionX, packPositionY, dimensions.GetWidth(), dimensions.GetHeight(), atlasUploadObserver));
// apply the half pixel correction
textureRect.x = (static_cast<float>(packPositionX) + 0.5f) / mWidth; // left
- textureRect.y = (static_cast<float>(packPositionY) + 0.5f) / mHeight; // right
+ textureRect.y = (static_cast<float>(packPositionY) + 0.5f) / mHeight; // top
textureRect.z = (static_cast<float>(packPositionX + dimensions.GetX()) - 0.5f) / mWidth; // right
textureRect.w = (static_cast<float>(packPositionY + dimensions.GetY()) - 0.5f) / mHeight; // bottom
return false;
}
+bool ImageAtlas::Upload(Vector4& textureRect,
+ const EncodedImageBuffer& encodedImageBuffer,
+ ImageDimensions size,
+ FittingMode::Type fittingMode,
+ bool orientationCorrection,
+ AtlasUploadObserver* atlasUploadObserver)
+{
+ ImageDimensions zero;
+ if(size == zero) // image size not provided
+ {
+ DALI_LOG_ERROR("Desired size is zero! We need to setup desired size for Atlas.\n");
+ // EncodedImageBuffer didn't support to get closest image size.
+ // Just draw broken image.
+ if(!mBrokenImageUrl.empty())
+ {
+ return Upload(textureRect, mBrokenImageUrl, mBrokenImageSize, FittingMode::DEFAULT, true, atlasUploadObserver);
+ }
+ else
+ {
+ textureRect = Vector4::ZERO;
+ return true;
+ }
+ }
+
+ uint32_t packPositionX = 0;
+ uint32_t packPositionY = 0;
+ if(mPacker.Pack(size.GetWidth(), size.GetHeight(), packPositionX, packPositionY))
+ {
+ uint32_t loadId = GetImplementation(mAsyncLoader).LoadEncodedImageBuffer(encodedImageBuffer, size, fittingMode, SamplingMode::BOX_THEN_LINEAR, orientationCorrection, DevelAsyncImageLoader::PreMultiplyOnLoad::OFF);
+ mLoadingTaskInfoContainer.PushBack(new LoadingTaskInfo(loadId, packPositionX, packPositionY, size.GetWidth(), size.GetHeight(), atlasUploadObserver));
+ // apply the half pixel correction
+ textureRect.x = (static_cast<float>(packPositionX) + 0.5f) / mWidth; // left
+ textureRect.y = (static_cast<float>(packPositionY) + 0.5f) / mHeight; // top
+ textureRect.z = (static_cast<float>(packPositionX + size.GetX()) - 0.5f) / mWidth; // right
+ textureRect.w = (static_cast<float>(packPositionY + size.GetY()) - 0.5f) / mHeight; // bottom
+
+ if(atlasUploadObserver)
+ {
+ // register to the observer,
+ // Not that a matching unregister call should be invoked in UploadToAtlas if the observer is still alive by then.
+ atlasUploadObserver->Register(*this);
+ }
+
+ return true;
+ }
+
+ return false;
+}
+
bool ImageAtlas::Upload(Vector4& textureRect, PixelData pixelData)
{
- unsigned int packPositionX = 0;
- unsigned int packPositionY = 0;
+ uint32_t packPositionX = 0;
+ uint32_t packPositionY = 0;
if(mPacker.Pack(pixelData.GetWidth(), pixelData.GetHeight(), packPositionX, packPositionY))
{
mAtlas.Upload(pixelData, 0u, 0u, packPositionX, packPositionY, pixelData.GetWidth(), pixelData.GetHeight());
// apply the half pixel correction
textureRect.x = (static_cast<float>(packPositionX) + 0.5f) / mWidth; // left
- textureRect.y = (static_cast<float>(packPositionY) + 0.5f) / mHeight; // right
+ textureRect.y = (static_cast<float>(packPositionY) + 0.5f) / mHeight; // top
textureRect.z = (static_cast<float>(packPositionX + pixelData.GetWidth()) - 0.5f) / mWidth; // right
textureRect.w = (static_cast<float>(packPositionY + pixelData.GetHeight()) - 0.5f) / mHeight; // bottom
{
if(mLoadingTaskInfoContainer[0]->loadTaskId == id)
{
- Rect<unsigned int> packRect(mLoadingTaskInfoContainer[0]->packRect);
+ Rect<uint32_t> packRect(mLoadingTaskInfoContainer[0]->packRect);
if(!pixelData || (pixelData.GetWidth() == 0 && pixelData.GetHeight() == 0))
{
if(!mBrokenImageUrl.empty()) // replace with the broken image
}
}
-void ImageAtlas::UploadBrokenImage(const Rect<unsigned int>& area)
+void ImageAtlas::UploadBrokenImage(const Rect<uint32_t>& area)
{
Devel::PixelBuffer brokenBuffer = LoadImageFromFile(mBrokenImageUrl, ImageDimensions(area.width, area.height));
SizeType loadedWidth = brokenBuffer.GetWidth();
#define DALI_TOOLKIT_IMAGE_ATLAS_IMPL_H
/*
- * Copyright (c) 2021 Samsung Electronics Co., Ltd.
+ * Copyright (c) 2022 Samsung Electronics Co., Ltd.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
// EXTERNAL INCLUDES
#include <dali/devel-api/common/owner-container.h>
+#include <dali/public-api/adaptor-framework/encoded-image-buffer.h>
#include <dali/public-api/common/intrusive-ptr.h>
#include <dali/public-api/object/base-object.h>
#include <dali/public-api/signals/connection-tracker.h>
// INTERNAL INCLUDES
#include <dali-toolkit/devel-api/image-loader/image-atlas.h>
#include <dali-toolkit/internal/image-loader/atlas-packer.h>
-#include <dali-toolkit/public-api/image-loader/async-image-loader.h>
#include <dali-toolkit/internal/visuals/visual-url.h>
+#include <dali-toolkit/public-api/image-loader/async-image-loader.h>
namespace Dali
{
AtlasUploadObserver* atlasUploadObserver);
/**
+ * @brief Upload a resource image to the atlas by encoded buffer.
+ *
+ * @note To make the atlasing efficient, a valid size should be provided.
+ * If size is not provided, then SegFault occured.
+ * Do not set a size that is bigger than the actual image size, as the up-scaling is not available,
+ * the content of the area not covered by actual image is undefined, it will not be cleared.
+ *
+ * SamplingMode::BOX_THEN_LINEAR is used to sampling pixels from the input image while fitting it to desired size.
+ *
+ * @param [out] textureRect The texture area of the resource image in the atlas.
+ * @param [in] encodedImageBuffer The encoded raw buffer of the resource image file to use.
+ * @param [in] size The width and height to fit the loaded image to.
+ * @param [in] fittingMode The method used to fit the shape of the image before loading to the shape defined by the size parameter.
+ * @param [in] orientationCorrection Reorient the image to respect any orientation metadata in its header.
+ * @param [in] atlasUploadObserver The observer to observe the upload state inside the ImageAtlas.
+ * @return True if there is enough space to fit this image in,false otherwise.
+ * @note The valid callback function here is required to have the signature of void( void ).
+ */
+ bool Upload(Vector4& textureRect,
+ const EncodedImageBuffer& encodedImageBuffer,
+ ImageDimensions size,
+ FittingMode::Type fittingMode,
+ bool orientationCorrection,
+ AtlasUploadObserver* atlasUploadObserver);
+
+ /**
* @copydoc Toolkit::ImageAtlas::Upload( Vector4&, PixelData )
*/
bool Upload(Vector4& textureRect, PixelData pixelData);
*
* @param[in] area The pixel area for uploading.
*/
- void UploadBrokenImage(const Rect<unsigned int>& area);
+ void UploadBrokenImage(const Rect<uint32_t>& area);
// Undefined
ImageAtlas(const ImageAtlas& imageAtlas);
*/
struct LoadingTaskInfo
{
- LoadingTaskInfo(unsigned short loadTaskId,
- unsigned int packPositionX,
- unsigned int packPositionY,
- unsigned int width,
- unsigned int height,
+ LoadingTaskInfo(uint32_t loadTaskId,
+ uint32_t packPositionX,
+ uint32_t packPositionY,
+ uint32_t width,
+ uint32_t height,
AtlasUploadObserver* observer)
: loadTaskId(loadTaskId),
packRect(packPositionX, packPositionY, width, height),
{
}
- unsigned short loadTaskId;
- Rect<unsigned int> packRect;
+ uint32_t loadTaskId;
+ Rect<uint32_t> packRect;
AtlasUploadObserver* observer;
};
const Dali::SamplingMode::Type& samplingMode,
const TextureCacheManager::UseAtlas& useAtlas,
const TextureCacheManager::TextureId& maskTextureId,
- const bool& cropToMask)
+ const bool& cropToMask,
+ const std::uint32_t& frameIndex)
{
std::vector<std::uint8_t> hashTarget(url.GetUrl().begin(), url.GetUrl().end());
const size_t urlLength = hashTarget.size();
*hashTargetPtr++ = (cropToMask ? 'C' : 'M');
}
+ // Append the frameIndex. We don't do additional job when frameIndex = 0u due to the non-animated image case.
+ if(frameIndex > 0u)
+ {
+ auto textureIdIndex = hashTarget.size();
+ hashTarget.resize(hashTarget.size() + sizeof(std::uint32_t));
+ std::uint8_t* hashTargetPtr = reinterpret_cast<std::uint8_t*>(&(hashTarget[textureIdIndex]));
+
+ // Append the frame index to the end of the URL byte by byte:
+ std::uint32_t saltedFrameIndex = frameIndex;
+ for(size_t byteIter = 0; byteIter < sizeof(std::uint8_t); ++byteIter)
+ {
+ *hashTargetPtr++ = saltedFrameIndex & 0xff;
+ saltedFrameIndex >>= 8u;
+ }
+ }
+
return Dali::CalculateHash(hashTarget);
}
const TextureCacheManager::TextureId& maskTextureId,
const bool& cropToMask,
const TextureCacheManager::MultiplyOnLoad& preMultiplyOnLoad,
- const bool& isAnimatedImage)
+ const bool& isAnimatedImage,
+ const std::uint32_t& frameIndex)
{
// Iterate through our hashes to find a match.
const auto& hashIterator = mTextureHashContainer.find(hash);
(cropToMask == textureInfo.cropToMask) &&
(size == textureInfo.desiredSize) &&
(isAnimatedImage == textureInfo.isAnimatedImageFormat) &&
+ (frameIndex == textureInfo.frameIndex) &&
((size.GetWidth() == 0 && size.GetHeight() == 0) ||
(fittingMode == textureInfo.fittingMode &&
samplingMode == textureInfo.samplingMode)))
* @param[in] useAtlas True if atlased
* @param[in] maskTextureId The masking texture id (or INVALID_TEXTURE_ID)
* @param[in] cropToMask True if crop to mask
+ * @param[in] frameIndex The frame index to use
* @return A hash of the provided data for caching.
*/
TextureCacheManager::TextureHash GenerateHash(
const Dali::SamplingMode::Type& samplingMode,
const TextureCacheManager::UseAtlas& useAtlas,
const TextureCacheManager::TextureId& maskTextureId,
- const bool& cropToMask);
+ const bool& cropToMask,
+ const std::uint32_t& frameIndex);
/**
* @brief Looks up a cached texture by its hash.
* @param[in] samplingMode The SamplingMode to use
* @param[in] useAtlas True if atlased
* @param[in] maskTextureId Optional texture ID to use to mask this image
+ * @param[in] cropToMask True if crop to mask
* @param[in] preMultiplyOnLoad if the image's color should be multiplied by it's alpha. Set to OFF if there is no alpha.
* @param[in] isAnimatedImage True if the texture is from animated image.
- * @param[in] cropToMask True if crop to mask
+ * @param[in] frameIndex The frame index to use
* @return A TextureCacheIndex of a cached Texture if found. Or INVALID_CACHE_INDEX if not found.
*/
TextureCacheManager::TextureCacheIndex FindCachedTexture(
const TextureCacheManager::TextureId& maskTextureId,
const bool& cropToMask,
const TextureCacheManager::MultiplyOnLoad& preMultiplyOnLoad,
- const bool& isAnimatedImage);
+ const bool& isAnimatedImage,
+ const std::uint32_t& frameIndex);
/**
* @brief Append a Texture to the TextureCacheManager.
const Dali::WrapMode::Type& wrapModeU,
const Dali::WrapMode::Type& wrapModeV,
const bool& synchronousLoading,
- const bool& useCache,
TextureUploadObserver* textureObserver)
{
TextureSet textureSet;
}
auto preMultiply = TextureManager::MultiplyOnLoad::LOAD_WITHOUT_MULTIPLY;
- textureId = RequestLoadInternal(animatedImageLoading.GetUrl(), alphaMaskId, contentScaleFactor, ImageDimensions(), FittingMode::SCALE_TO_FILL, SamplingMode::BOX_THEN_LINEAR, UseAtlas::NO_ATLAS, cropToMask, StorageType::UPLOAD_TO_TEXTURE, textureObserver, true, TextureManager::ReloadPolicy::CACHED, preMultiply, animatedImageLoading, frameIndex, false, useCache);
+ textureId = RequestLoadInternal(animatedImageLoading.GetUrl(), alphaMaskId, contentScaleFactor, ImageDimensions(), FittingMode::SCALE_TO_FILL, SamplingMode::BOX_THEN_LINEAR, UseAtlas::NO_ATLAS, cropToMask, StorageType::UPLOAD_TO_TEXTURE, textureObserver, true, TextureManager::ReloadPolicy::CACHED, preMultiply, animatedImageLoading, frameIndex, false);
TextureManager::LoadState loadState = mTextureCacheManager.GetTextureStateInternal(textureId);
if(loadState == TextureManager::LoadState::UPLOADED)
}
else
{
- RequestLoadInternal(url, INVALID_TEXTURE_ID, 1.0f, desiredSize, fittingMode, samplingMode, UseAtlas::NO_ATLAS, false, StorageType::RETURN_PIXEL_BUFFER, textureObserver, orientationCorrection, TextureManager::ReloadPolicy::FORCED, preMultiplyOnLoad, Dali::AnimatedImageLoading(), 0u, false, false);
+ RequestLoadInternal(url, INVALID_TEXTURE_ID, 1.0f, desiredSize, fittingMode, samplingMode, UseAtlas::NO_ATLAS, false, StorageType::RETURN_PIXEL_BUFFER, textureObserver, orientationCorrection, TextureManager::ReloadPolicy::FORCED, preMultiplyOnLoad, Dali::AnimatedImageLoading(), 0u, false);
}
return pixelBuffer;
else
{
// For Atlas
- if(synchronousLoading && atlasingStatus && imageAtlasManager->CheckAtlasAvailable(url, desiredSize))
+ if(synchronousLoading && atlasingStatus)
{
- Devel::PixelBuffer pixelBuffer = LoadImageSynchronously(url, desiredSize, fittingMode, samplingMode, orientationCorrection);
-
- if(maskInfo && maskInfo->mAlphaMaskUrl.IsValid())
+ const bool synchronousAtlasAvaliable = (desiredSize != ImageDimensions() || url.IsLocalResource()) ? imageAtlasManager->CheckAtlasAvailable(url, desiredSize)
+ : false;
+ if(synchronousAtlasAvaliable)
{
- Devel::PixelBuffer maskPixelBuffer = LoadImageSynchronously(maskInfo->mAlphaMaskUrl.GetUrl(), ImageDimensions(), FittingMode::SCALE_TO_FILL, SamplingMode::NO_FILTER, true);
- if(maskPixelBuffer)
+ Devel::PixelBuffer pixelBuffer = LoadImageSynchronously(url, desiredSize, fittingMode, samplingMode, orientationCorrection);
+
+ if(pixelBuffer && maskInfo && maskInfo->mAlphaMaskUrl.IsValid())
{
- pixelBuffer.ApplyMask(maskPixelBuffer, maskInfo->mContentScaleFactor, maskInfo->mCropToMask);
+ Devel::PixelBuffer maskPixelBuffer = LoadImageSynchronously(maskInfo->mAlphaMaskUrl, ImageDimensions(), FittingMode::SCALE_TO_FILL, SamplingMode::NO_FILTER, true);
+ if(maskPixelBuffer)
+ {
+ pixelBuffer.ApplyMask(maskPixelBuffer, maskInfo->mContentScaleFactor, maskInfo->mCropToMask);
+ }
}
- }
- PixelData data;
- if(pixelBuffer)
- {
- PreMultiply(pixelBuffer, preMultiplyOnLoad);
- data = Devel::PixelBuffer::Convert(pixelBuffer); // takes ownership of buffer
-
- if(data)
+ PixelData data;
+ if(pixelBuffer)
{
- textureSet = imageAtlasManager->Add(textureRect, data);
- if(textureSet)
+ PreMultiply(pixelBuffer, preMultiplyOnLoad);
+ data = Devel::PixelBuffer::Convert(pixelBuffer); // takes ownership of buffer
+
+ if(data)
{
- textureRectSize.SetWidth(data.GetWidth());
- textureRectSize.SetHeight(data.GetHeight());
+ textureSet = imageAtlasManager->Add(textureRect, data);
+ if(textureSet)
+ {
+ textureRectSize.SetWidth(data.GetWidth());
+ textureRectSize.SetHeight(data.GetHeight());
+ }
+ }
+ else
+ {
+ DALI_LOG_ERROR("TextureManager::LoadTexture: Synchronous Texture loading with atlasing is failed.\n");
}
}
- else
+ if(!textureSet)
{
- DALI_LOG_ERROR("TextureManager::LoadTexture: Synchronous Texture loading with atlasing is failed.\n");
+ atlasingStatus = false;
}
}
- if(!textureSet)
- {
- atlasingStatus = false;
- }
}
if(!textureSet)
Dali::ImageDimensions atlasDesiredSize = desiredSize;
if(atlasingStatus)
{
- textureSet = imageAtlasManager->Add(textureRect, url.GetUrl(), atlasDesiredSize, fittingMode, true, atlasObserver);
+ if(url.IsBufferResource())
+ {
+ const EncodedImageBuffer& encodedImageBuffer = GetEncodedImageBuffer(url.GetUrl());
+ if(encodedImageBuffer)
+ {
+ textureSet = imageAtlasManager->Add(textureRect, encodedImageBuffer, desiredSize, fittingMode, true, atlasObserver);
+ }
+ }
+ else
+ {
+ textureSet = imageAtlasManager->Add(textureRect, url, atlasDesiredSize, fittingMode, true, atlasObserver);
+ }
}
if(!textureSet) // big image, no atlasing or atlasing failed
{
TextureManager::MultiplyOnLoad& preMultiplyOnLoad,
const bool& synchronousLoading)
{
- return RequestLoadInternal(url, INVALID_TEXTURE_ID, 1.0f, desiredSize, fittingMode, samplingMode, useAtlas, false, StorageType::UPLOAD_TO_TEXTURE, observer, orientationCorrection, reloadPolicy, preMultiplyOnLoad, Dali::AnimatedImageLoading(), 0u, synchronousLoading, true);
+ return RequestLoadInternal(url, INVALID_TEXTURE_ID, 1.0f, desiredSize, fittingMode, samplingMode, useAtlas, false, StorageType::UPLOAD_TO_TEXTURE, observer, orientationCorrection, reloadPolicy, preMultiplyOnLoad, Dali::AnimatedImageLoading(), 0u, synchronousLoading);
}
TextureManager::TextureId TextureManager::RequestLoad(
TextureManager::MultiplyOnLoad& preMultiplyOnLoad,
const bool& synchronousLoading)
{
- return RequestLoadInternal(url, maskTextureId, contentScale, desiredSize, fittingMode, samplingMode, useAtlas, cropToMask, StorageType::UPLOAD_TO_TEXTURE, observer, orientationCorrection, reloadPolicy, preMultiplyOnLoad, Dali::AnimatedImageLoading(), 0u, synchronousLoading, true);
+ return RequestLoadInternal(url, maskTextureId, contentScale, desiredSize, fittingMode, samplingMode, useAtlas, cropToMask, StorageType::UPLOAD_TO_TEXTURE, observer, orientationCorrection, reloadPolicy, preMultiplyOnLoad, Dali::AnimatedImageLoading(), 0u, synchronousLoading);
}
TextureManager::TextureId TextureManager::RequestMaskLoad(
{
// Use the normal load procedure to get the alpha mask.
auto preMultiply = TextureManager::MultiplyOnLoad::LOAD_WITHOUT_MULTIPLY;
- return RequestLoadInternal(maskUrl, INVALID_TEXTURE_ID, 1.0f, ImageDimensions(), FittingMode::SCALE_TO_FILL, SamplingMode::NO_FILTER, UseAtlas::NO_ATLAS, false, StorageType::KEEP_PIXEL_BUFFER, NULL, true, TextureManager::ReloadPolicy::CACHED, preMultiply, Dali::AnimatedImageLoading(), 0u, synchronousLoading, true);
+ return RequestLoadInternal(maskUrl, INVALID_TEXTURE_ID, 1.0f, ImageDimensions(), FittingMode::SCALE_TO_FILL, SamplingMode::NO_FILTER, UseAtlas::NO_ATLAS, false, StorageType::KEEP_PIXEL_BUFFER, NULL, true, TextureManager::ReloadPolicy::CACHED, preMultiply, Dali::AnimatedImageLoading(), 0u, synchronousLoading);
}
TextureManager::TextureId TextureManager::RequestLoadInternal(
TextureManager::MultiplyOnLoad& preMultiplyOnLoad,
Dali::AnimatedImageLoading animatedImageLoading,
const std::uint32_t& frameIndex,
- const bool& synchronousLoading,
- const bool& useCache)
+ const bool& synchronousLoading)
{
TextureHash textureHash = INITIAL_HASH_NUMBER;
TextureCacheIndex cacheIndex = INVALID_CACHE_INDEX;
- if(storageType != StorageType::RETURN_PIXEL_BUFFER && useCache)
+ if(storageType != StorageType::RETURN_PIXEL_BUFFER)
{
- textureHash = mTextureCacheManager.GenerateHash(url, desiredSize, fittingMode, samplingMode, useAtlas, maskTextureId, cropToMask);
+ textureHash = mTextureCacheManager.GenerateHash(url, desiredSize, fittingMode, samplingMode, useAtlas, maskTextureId, cropToMask, frameIndex);
// Look up the texture by hash. Note: The extra parameters are used in case of a hash collision.
- cacheIndex = mTextureCacheManager.FindCachedTexture(textureHash, url, desiredSize, fittingMode, samplingMode, useAtlas, maskTextureId, cropToMask, preMultiplyOnLoad, (animatedImageLoading) ? true : false);
+ cacheIndex = mTextureCacheManager.FindCachedTexture(textureHash, url, desiredSize, fittingMode, samplingMode, useAtlas, maskTextureId, cropToMask, preMultiplyOnLoad, (animatedImageLoading) ? true : false, frameIndex);
}
TextureManager::TextureId textureId = INVALID_TEXTURE_ID;
// Update preMultiplyOnLoad value. It should be changed according to preMultiplied value of the cached info.
preMultiplyOnLoad = mTextureCacheManager[cacheIndex].preMultiplied ? TextureManager::MultiplyOnLoad::MULTIPLY_ON_LOAD : TextureManager::MultiplyOnLoad::LOAD_WITHOUT_MULTIPLY;
- DALI_LOG_INFO(gTextureManagerLogFilter, Debug::General, "TextureManager::RequestLoad( url=%s observer=%p ) Using cached texture id@%d, textureId=%d premultiplied=%d\n", url.GetUrl().c_str(), observer, cacheIndex.GetIndex(), textureId, mTextureCacheManager[cacheIndex].preMultiplied ? 1 : 0);
+ DALI_LOG_INFO(gTextureManagerLogFilter, Debug::General, "TextureManager::RequestLoad( url=%s observer=%p ) Using cached texture id@%d, textureId=%d, frameindex=%d, premultiplied=%d\n", url.GetUrl().c_str(), observer, cacheIndex.GetIndex(), textureId, frameIndex, mTextureCacheManager[cacheIndex].preMultiplied ? 1 : 0);
}
if(textureId == INVALID_TEXTURE_ID) // There was no caching, or caching not required
// Cache new texutre, and get cacheIndex.
cacheIndex = mTextureCacheManager.AppendCache(TextureInfo(textureId, maskTextureId, url, desiredSize, contentScale, fittingMode, samplingMode, false, cropToMask, useAtlas, textureHash, orientationCorrection, preMultiply, animatedImageLoading, frameIndex));
- DALI_LOG_INFO(gTextureManagerLogFilter, Debug::General, "TextureManager::RequestLoad( url=%s observer=%p ) New texture, cacheIndex:%d, textureId=%d\n", url.GetUrl().c_str(), observer, cacheIndex.GetIndex(), textureId);
+ DALI_LOG_INFO(gTextureManagerLogFilter, Debug::General, "TextureManager::RequestLoad( url=%s observer=%p ) New texture, cacheIndex:%d, textureId=%d, frameindex=%d\n", url.GetUrl().c_str(), observer, cacheIndex.GetIndex(), textureId, frameIndex);
}
// The below code path is common whether we are using the cache or not.
mQueueLoadFlag = true;
+ // Reverse observer list that we can pop_back the observer.
+ std::reverse(info->observerList.Begin(), info->observerList.End());
+
while(info->observerList.Count())
{
- TextureUploadObserver* observer = info->observerList[0];
+ TextureUploadObserver* observer = *(info->observerList.End() - 1u);
// During LoadComplete() a Control ResourceReady() signal is emitted.
// During that signal the app may add remove /add Textures (e.g. via
// Disconnect and remove the observer first.
observer->DestructionSignal().Disconnect(this, &TextureManager::ObserverDestroyed);
- info->observerList.Erase(info->observerList.Begin());
+ info->observerList.Erase(info->observerList.End() - 1u);
EmitLoadComplete(observer, *info, success);
* @param[in] wrapModeU Horizontal Wrap mode
* @param[in] wrapModeV Vertical Wrap mode
* @param[in] synchronousLoading true if the frame should be loaded synchronously
- * @param[in] useCache true if this frame loading uses cache.
* @param[in] textureObserver The client object should inherit from this and provide the "LoadCompleted" virtual.
* This is called when an image load completes (or fails).
*
const Dali::WrapMode::Type& wrapModeU,
const Dali::WrapMode::Type& wrapModeV,
const bool& synchronousLoading,
- const bool& useCache,
TextureUploadObserver* textureObserver);
/**
* @param[in] frameIndex The frame index of a frame to be loaded frame
* @param[in] synchronousLoading True if the frame should be loaded synchronously. If you skip this parameter,
* default is false.
- * @param[in] useCache True if the texture will be cached.
* @return A TextureId to use as a handle to reference this Texture
*/
TextureId RequestLoadInternal(
TextureManager::MultiplyOnLoad& preMultiplyOnLoad,
Dali::AnimatedImageLoading animatedImageLoading,
const std::uint32_t& frameIndex,
- const bool& synchronousLoading,
- const bool& useCache);
+ const bool& synchronousLoading);
/**
* @brief Load a new image synchronously.
/*
- * Copyright (c) 2021 Samsung Electronics Co., Ltd.
+ * Copyright (c) 2022 Samsung Electronics Co., Ltd.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
bool synchronouslyLoaded = false;
if(mIsSynchronousLoading && mQueue.IsEmpty())
{
- textureSet = RequestFrameLoading(frameIndex, frameIndex == FIRST_FRAME_INDEX, true);
- batchFrameIndex = (frameIndex + 1) % mFrameCount;
+ textureSet = RequestFrameLoading(frameIndex, true);
+ batchFrameIndex = (frameIndex + 1) % mFrameCount;
uint32_t interval = 0u;
if(textureSet)
{
synchronouslyLoaded = true;
- interval = mAnimatedImageLoading.GetFrameInterval(mQueue.Back().mFrameNumber);
+ interval = mAnimatedImageLoading.GetFrameInterval(mQueue.Back().mFrameNumber);
}
MakeFrameReady(synchronouslyLoaded, textureSet, interval);
}
return (!mQueue.IsEmpty() && mQueue.Front().mReady);
}
-TextureSet RollingAnimatedImageCache::RequestFrameLoading(uint32_t frameIndex, bool useCache, bool synchronousLoading)
+TextureSet RollingAnimatedImageCache::RequestFrameLoading(uint32_t frameIndex, bool synchronousLoading)
{
ImageFrame imageFrame;
imageFrame.mFrameNumber = frameIndex;
Dali::WrapMode::Type::DEFAULT,
Dali::WrapMode::Type::DEFAULT,
synchronousLoading,
- useCache,
this);
mImageUrls[frameIndex].mTextureId = loadTextureId;
{
if(mLoadState != TextureManager::LoadState::LOADING)
{
- RequestFrameLoading(frameIndex, frameIndex == FIRST_FRAME_INDEX, false);
+ RequestFrameLoading(frameIndex, false);
}
else
{
{
uint32_t loadingIndex = mLoadWaitingQueue.front();
mLoadWaitingQueue.erase(mLoadWaitingQueue.begin());
- RequestFrameLoading(loadingIndex, loadingIndex == FIRST_FRAME_INDEX, false);
+ RequestFrameLoading(loadingIndex, false);
}
else if(mQueue.Count() == 1u && textureInformation.frameCount > SINGLE_IMAGE_COUNT)
{
* @brief Request to Load a frame
*
* @param[in] frameIndex index of frame to be loaded.
- * @param[in] useCache true if this frame loading uses cache.
* @param[in] synchronousLoading true if the frame should be loaded synchronously
*
* @return the texture set currently loaded.
*/
- TextureSet RequestFrameLoading(uint32_t frameIndex, bool useCache, bool synchronousLoading);
+ TextureSet RequestFrameLoading(uint32_t frameIndex, bool synchronousLoading);
/**
* @brief Load the next batch of images
void LoadComplete(bool loadSuccess, TextureInformation textureInformation) override;
private:
-
/**
* Secondary class to hold readiness and index into url
*/
/*
- * Copyright (c) 2021 Samsung Electronics Co., Ltd.
+ * Copyright (c) 2022 Samsung Electronics Co., Ltd.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
}
size = dimensions;
- unsigned int i = 0;
+ uint32_t i = 0;
for(AtlasContainer::iterator iter = mAtlasList.begin(); iter != mAtlasList.end(); ++iter)
{
if(GetImplementation(*iter).Upload(textureRect, url, size, fittingMode, orientationCorrection, atlasUploadObserver))
return mTextureSetList.back();
}
+TextureSet ImageAtlasManager::Add(Vector4& textureRect,
+ const EncodedImageBuffer& encodedImageBuffer,
+ const ImageDimensions& size,
+ FittingMode::Type fittingMode,
+ bool orientationCorrection,
+ AtlasUploadObserver* atlasUploadObserver)
+{
+ // big image, atlasing is not applied
+ if(static_cast<uint32_t>(size.GetWidth()) * static_cast<uint32_t>(size.GetHeight()) > MAX_ITEM_AREA || size.GetWidth() > DEFAULT_ATLAS_SIZE || size.GetHeight() > DEFAULT_ATLAS_SIZE)
+ {
+ return TextureSet();
+ }
+
+ uint32_t i = 0;
+ for(AtlasContainer::iterator iter = mAtlasList.begin(); iter != mAtlasList.end(); ++iter)
+ {
+ if(GetImplementation(*iter).Upload(textureRect, encodedImageBuffer, size, fittingMode, orientationCorrection, atlasUploadObserver))
+ {
+ return mTextureSetList[i];
+ }
+ i++;
+ }
+
+ CreateNewAtlas();
+ GetImplementation(mAtlasList.back()).Upload(textureRect, encodedImageBuffer, size, fittingMode, orientationCorrection, atlasUploadObserver);
+ return mTextureSetList.back();
+}
+
TextureSet ImageAtlasManager::Add(Vector4& textureRect,
PixelData pixelData)
{
return TextureSet();
}
- unsigned int i = 0;
+ uint32_t i = 0;
for(AtlasContainer::iterator iter = mAtlasList.begin(); iter != mAtlasList.end(); ++iter)
{
if((*iter).Upload(textureRect, pixelData))
void ImageAtlasManager::Remove(TextureSet textureSet, const Vector4& textureRect)
{
- unsigned int i = 0;
+ uint32_t i = 0;
for(TextureSetContainer::iterator iter = mTextureSetList.begin(); iter != mTextureSetList.end(); ++iter)
{
if((*iter) == textureSet)
#define DALI_TOOLKIT_IMAGE_ATLAS_MANAGER_H
/*
- * Copyright (c) 2021 Samsung Electronics Co., Ltd.
+ * Copyright (c) 2022 Samsung Electronics Co., Ltd.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
*/
// EXTERNAL INCLUDES
+#include <dali/public-api/adaptor-framework/encoded-image-buffer.h>
#include <dali/public-api/common/vector-wrapper.h>
#include <dali/public-api/object/ref-object.h>
#include <dali/public-api/rendering/texture-set.h>
FittingMode::Type fittingMode = FittingMode::DEFAULT,
bool orientationCorrection = true,
AtlasUploadObserver* atlasUploadObserver = NULL);
+
+ /**
+ * @brief Add an image to the atlas.
+ *
+ * @note To make the atlasing efficient, an valid size should be provided.
+ *
+ * SamplingMode::BOX_THEN_LINEAR is used to sampling pixels from the input image while fitting it to desired size.
+ *
+ * @param [out] textureRect The texture area of the resource image in the atlas.
+ * @param [in] encodedImageBuffer The encoded buffer of the resource image file to use.
+ * @param [in] size The width and height to fit the loaded image to.
+ * @param [in] fittingMode The method used to fit the shape of the image before loading to the shape defined by the size parameter.
+ * @param [in] orientationCorrection Reorient the image to respect any orientation metadata in its header.
+ * @param [in] atlasUploadObserver The object to observe the uploading state inside ImageAtlas.
+ * @return The texture set containing the image.
+ */
+ TextureSet Add(Vector4& textureRect,
+ const EncodedImageBuffer& encodedImageBuffer,
+ const ImageDimensions& size,
+ FittingMode::Type fittingMode = FittingMode::DEFAULT,
+ bool orientationCorrection = true,
+ AtlasUploadObserver* atlasUploadObserver = NULL);
+
/**
* @brief Add a pixel buffer to the atlas
*
{
const unsigned int TOOLKIT_MAJOR_VERSION = 2;
const unsigned int TOOLKIT_MINOR_VERSION = 1;
-const unsigned int TOOLKIT_MICRO_VERSION = 21;
+const unsigned int TOOLKIT_MICRO_VERSION = 22;
const char* const TOOLKIT_BUILD_DATE = __DATE__ " " __TIME__;
#ifdef DEBUG_ENABLED
Name: dali2-toolkit
Summary: Dali 3D engine Toolkit
-Version: 2.1.21
+Version: 2.1.22
Release: 1
Group: System/Libraries
License: Apache-2.0 and BSD-3-Clause and MIT