From b63263267d052c1ba0793c7429779df8d03a9e4e Mon Sep 17 00:00:00 2001 From: John Kessenich Date: Fri, 26 Jun 2015 16:42:50 -0600 Subject: [PATCH] Second round line ending clean up, from fresh round trip. --- glslang/CMakeLists.txt | 192 +-- glslang/Include/revision.h | 26 +- glslang/MachineIndependent/gl_types.h | 300 ++-- glslang/MachineIndependent/limits.cpp | 396 +++--- glslang/MachineIndependent/linkValidate.cpp | 1968 +++++++++++++-------------- glslang/MachineIndependent/reflection.h | 244 ++-- glslang/OSDependent/Linux/CMakeLists.txt | 4 +- glslang/OSDependent/Windows/CMakeLists.txt | 28 +- 8 files changed, 1579 insertions(+), 1579 deletions(-) diff --git a/glslang/CMakeLists.txt b/glslang/CMakeLists.txt index 7fa5566..c8fbea6 100644 --- a/glslang/CMakeLists.txt +++ b/glslang/CMakeLists.txt @@ -1,96 +1,96 @@ -cmake_minimum_required(VERSION 2.8) - -include_directories(MachineIndependent ../OGLCompilersDLL ${CMAKE_CURRENT_BINARY_DIR}) -if(WIN32) - add_subdirectory(OSDependent/Windows) - include_directories(${include_directories} OSDependent/Windows) -elseif(UNIX) - add_subdirectory(OSDependent/Linux) - include_directories(${include_directories} OSDependent/Linux) -else(WIN32) - message("unkown platform") -endif(WIN32) - -set(SOURCES - MachineIndependent/glslang.y - MachineIndependent/Constant.cpp - MachineIndependent/InfoSink.cpp - MachineIndependent/Initialize.cpp - MachineIndependent/IntermTraverse.cpp - MachineIndependent/Intermediate.cpp - MachineIndependent/ParseHelper.cpp - MachineIndependent/PoolAlloc.cpp - MachineIndependent/RemoveTree.cpp - MachineIndependent/Scan.cpp - MachineIndependent/ShaderLang.cpp - MachineIndependent/SymbolTable.cpp - MachineIndependent/Versions.cpp - MachineIndependent/intermOut.cpp - MachineIndependent/limits.cpp - MachineIndependent/linkValidate.cpp - MachineIndependent/parseConst.cpp - MachineIndependent/reflection.cpp - MachineIndependent/preprocessor/Pp.cpp - MachineIndependent/preprocessor/PpAtom.cpp - MachineIndependent/preprocessor/PpContext.cpp - MachineIndependent/preprocessor/PpMemory.cpp - MachineIndependent/preprocessor/PpScanner.cpp - MachineIndependent/preprocessor/PpSymbols.cpp - MachineIndependent/preprocessor/PpTokens.cpp - GenericCodeGen/CodeGen.cpp - GenericCodeGen/Link.cpp) - -set(HEADERS - Public/ShaderLang.h - Include/BaseTypes.h - Include/Common.h - Include/ConstantUnion.h - Include/InfoSink.h - Include/InitializeGlobals.h - Include/intermediate.h - Include/PoolAlloc.h - Include/ResourceLimits.h - Include/revision.h - Include/ShHandle.h - Include/Types.h - MachineIndependent/gl_types.h - MachineIndependent/Initialize.h - MachineIndependent/localintermediate.h - MachineIndependent/ParseHelper.h - MachineIndependent/reflection.h - MachineIndependent/RemoveTree.h - MachineIndependent/Scan.h - MachineIndependent/ScanContext.h - MachineIndependent/SymbolTable.h - MachineIndependent/unistd.h - MachineIndependent/Versions.h - MachineIndependent/preprocessor/PpContext.h - MachineIndependent/preprocessor/PpTokens.h) - -find_package(BISON) -if(NOT BISON_FOUND) - set(BISON_EXECUTABLE ../tools/bison.exe) - message("bison not found. Assuming it is at ${BISON_EXECUTABLE}") -endif() - -# Always use a custom command since our use of --defines isn't assumed by CMake's BISON_TARGET, -# which ends up causing the target to always be rebuilt. -add_custom_command(OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/glslang_tab.cpp ${CMAKE_CURRENT_BINARY_DIR}/glslang_tab.cpp.h - COMMAND ${BISON_EXECUTABLE} --defines=${CMAKE_CURRENT_BINARY_DIR}/glslang_tab.cpp.h -t MachineIndependent/glslang.y -o ${CMAKE_CURRENT_BINARY_DIR}/glslang_tab.cpp - MAIN_DEPENDENCY MachineIndependent/glslang.y - WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}) -set(BISON_GLSLParser_OUTPUT_SOURCE ${CMAKE_CURRENT_BINARY_DIR}/glslang_tab.cpp) - -add_library(glslang STATIC ${BISON_GLSLParser_OUTPUT_SOURCE} ${SOURCES} ${HEADERS}) - -if(WIN32) - source_group("Public" REGULAR_EXPRESSION "Public/*") - source_group("MachineIndependent" REGULAR_EXPRESSION "MachineIndependent/[^/]*") - source_group("Generated Files" FILES ${CMAKE_CURRENT_BINARY_DIR}/glslang_tab.cpp ${CMAKE_CURRENT_BINARY_DIR}/glslang_tab.cpp.h) - source_group("Include" REGULAR_EXPRESSION "Include/[^/]*") - source_group("GenericCodeGen" REGULAR_EXPRESSION "GenericCodeGen/*") - source_group("MachineIndependent\\Preprocessor" REGULAR_EXPRESSION "MachineIndependent/preprocessor/*") -endif(WIN32) - -install(TARGETS glslang - ARCHIVE DESTINATION lib) +cmake_minimum_required(VERSION 2.8) + +include_directories(MachineIndependent ../OGLCompilersDLL ${CMAKE_CURRENT_BINARY_DIR}) +if(WIN32) + add_subdirectory(OSDependent/Windows) + include_directories(${include_directories} OSDependent/Windows) +elseif(UNIX) + add_subdirectory(OSDependent/Linux) + include_directories(${include_directories} OSDependent/Linux) +else(WIN32) + message("unkown platform") +endif(WIN32) + +set(SOURCES + MachineIndependent/glslang.y + MachineIndependent/Constant.cpp + MachineIndependent/InfoSink.cpp + MachineIndependent/Initialize.cpp + MachineIndependent/IntermTraverse.cpp + MachineIndependent/Intermediate.cpp + MachineIndependent/ParseHelper.cpp + MachineIndependent/PoolAlloc.cpp + MachineIndependent/RemoveTree.cpp + MachineIndependent/Scan.cpp + MachineIndependent/ShaderLang.cpp + MachineIndependent/SymbolTable.cpp + MachineIndependent/Versions.cpp + MachineIndependent/intermOut.cpp + MachineIndependent/limits.cpp + MachineIndependent/linkValidate.cpp + MachineIndependent/parseConst.cpp + MachineIndependent/reflection.cpp + MachineIndependent/preprocessor/Pp.cpp + MachineIndependent/preprocessor/PpAtom.cpp + MachineIndependent/preprocessor/PpContext.cpp + MachineIndependent/preprocessor/PpMemory.cpp + MachineIndependent/preprocessor/PpScanner.cpp + MachineIndependent/preprocessor/PpSymbols.cpp + MachineIndependent/preprocessor/PpTokens.cpp + GenericCodeGen/CodeGen.cpp + GenericCodeGen/Link.cpp) + +set(HEADERS + Public/ShaderLang.h + Include/BaseTypes.h + Include/Common.h + Include/ConstantUnion.h + Include/InfoSink.h + Include/InitializeGlobals.h + Include/intermediate.h + Include/PoolAlloc.h + Include/ResourceLimits.h + Include/revision.h + Include/ShHandle.h + Include/Types.h + MachineIndependent/gl_types.h + MachineIndependent/Initialize.h + MachineIndependent/localintermediate.h + MachineIndependent/ParseHelper.h + MachineIndependent/reflection.h + MachineIndependent/RemoveTree.h + MachineIndependent/Scan.h + MachineIndependent/ScanContext.h + MachineIndependent/SymbolTable.h + MachineIndependent/unistd.h + MachineIndependent/Versions.h + MachineIndependent/preprocessor/PpContext.h + MachineIndependent/preprocessor/PpTokens.h) + +find_package(BISON) +if(NOT BISON_FOUND) + set(BISON_EXECUTABLE ../tools/bison.exe) + message("bison not found. Assuming it is at ${BISON_EXECUTABLE}") +endif() + +# Always use a custom command since our use of --defines isn't assumed by CMake's BISON_TARGET, +# which ends up causing the target to always be rebuilt. +add_custom_command(OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/glslang_tab.cpp ${CMAKE_CURRENT_BINARY_DIR}/glslang_tab.cpp.h + COMMAND ${BISON_EXECUTABLE} --defines=${CMAKE_CURRENT_BINARY_DIR}/glslang_tab.cpp.h -t MachineIndependent/glslang.y -o ${CMAKE_CURRENT_BINARY_DIR}/glslang_tab.cpp + MAIN_DEPENDENCY MachineIndependent/glslang.y + WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}) +set(BISON_GLSLParser_OUTPUT_SOURCE ${CMAKE_CURRENT_BINARY_DIR}/glslang_tab.cpp) + +add_library(glslang STATIC ${BISON_GLSLParser_OUTPUT_SOURCE} ${SOURCES} ${HEADERS}) + +if(WIN32) + source_group("Public" REGULAR_EXPRESSION "Public/*") + source_group("MachineIndependent" REGULAR_EXPRESSION "MachineIndependent/[^/]*") + source_group("Generated Files" FILES ${CMAKE_CURRENT_BINARY_DIR}/glslang_tab.cpp ${CMAKE_CURRENT_BINARY_DIR}/glslang_tab.cpp.h) + source_group("Include" REGULAR_EXPRESSION "Include/[^/]*") + source_group("GenericCodeGen" REGULAR_EXPRESSION "GenericCodeGen/*") + source_group("MachineIndependent\\Preprocessor" REGULAR_EXPRESSION "MachineIndependent/preprocessor/*") +endif(WIN32) + +install(TARGETS glslang + ARCHIVE DESTINATION lib) diff --git a/glslang/Include/revision.h b/glslang/Include/revision.h index b2eb463..00836fc 100644 --- a/glslang/Include/revision.h +++ b/glslang/Include/revision.h @@ -1,13 +1,13 @@ -// The file revision.h should be updated to the latest version, somehow, on -// check-in, if glslang has changed. -// -// revision.template is the source for revision.h when using SubWCRev as the -// method of updating revision.h. You don't have to do it this way, the -// requirement is only that revision.h gets updated. -// -// revision.h is under source control so that not all consumers of glslang -// source have to figure out how to create revision.h just to get a build -// going. However, if it is not updated, it can be a version behind. - -#define GLSLANG_REVISION "25512" -#define GLSLANG_DATE "2014/02/24 14:36:08" +// The file revision.h should be updated to the latest version, somehow, on +// check-in, if glslang has changed. +// +// revision.template is the source for revision.h when using SubWCRev as the +// method of updating revision.h. You don't have to do it this way, the +// requirement is only that revision.h gets updated. +// +// revision.h is under source control so that not all consumers of glslang +// source have to figure out how to create revision.h just to get a build +// going. However, if it is not updated, it can be a version behind. + +#define GLSLANG_REVISION "25512" +#define GLSLANG_DATE "2014/02/24 14:36:08" diff --git a/glslang/MachineIndependent/gl_types.h b/glslang/MachineIndependent/gl_types.h index 98a86d0..91603b5 100644 --- a/glslang/MachineIndependent/gl_types.h +++ b/glslang/MachineIndependent/gl_types.h @@ -1,150 +1,150 @@ -/* -** Copyright (c) 2013 The Khronos Group Inc. -** -** Permission is hereby granted, free of charge, to any person obtaining a -** copy of this software and/or associated documentation files (the -** "Materials"), to deal in the Materials without restriction, including -** without limitation the rights to use, copy, modify, merge, publish, -** distribute, sublicense, and/or sell copies of the Materials, and to -** permit persons to whom the Materials are furnished to do so, subject to -** the following conditions: -** -** The above copyright notice and this permission notice shall be included -** in all copies or substantial portions of the Materials. -** -** THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -** EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -** MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -** IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -** CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -** TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -** MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS. -*/ - -#define GL_FLOAT 0x1406 -#define GL_FLOAT_VEC2 0x8B50 -#define GL_FLOAT_VEC3 0x8B51 -#define GL_FLOAT_VEC4 0x8B52 - -#define GL_DOUBLE 0x140A -#define GL_DOUBLE_VEC2 0x8FFC -#define GL_DOUBLE_VEC3 0x8FFD -#define GL_DOUBLE_VEC4 0x8FFE - -#define GL_INT 0x1404 -#define GL_INT_VEC2 0x8B53 -#define GL_INT_VEC3 0x8B54 -#define GL_INT_VEC4 0x8B55 - -#define GL_UNSIGNED_INT 0x1405 -#define GL_UNSIGNED_INT_VEC2 0x8DC6 -#define GL_UNSIGNED_INT_VEC3 0x8DC7 -#define GL_UNSIGNED_INT_VEC4 0x8DC8 - -#define GL_BOOL 0x8B56 -#define GL_BOOL_VEC2 0x8B57 -#define GL_BOOL_VEC3 0x8B58 -#define GL_BOOL_VEC4 0x8B59 - -#define GL_FLOAT_MAT2 0x8B5A -#define GL_FLOAT_MAT3 0x8B5B -#define GL_FLOAT_MAT4 0x8B5C -#define GL_FLOAT_MAT2x3 0x8B65 -#define GL_FLOAT_MAT2x4 0x8B66 -#define GL_FLOAT_MAT3x2 0x8B67 -#define GL_FLOAT_MAT3x4 0x8B68 -#define GL_FLOAT_MAT4x2 0x8B69 -#define GL_FLOAT_MAT4x3 0x8B6A - -#define GL_DOUBLE_MAT2 0x8F46 -#define GL_DOUBLE_MAT3 0x8F47 -#define GL_DOUBLE_MAT4 0x8F48 -#define GL_DOUBLE_MAT2x3 0x8F49 -#define GL_DOUBLE_MAT2x4 0x8F4A -#define GL_DOUBLE_MAT3x2 0x8F4B -#define GL_DOUBLE_MAT3x4 0x8F4C -#define GL_DOUBLE_MAT4x2 0x8F4D -#define GL_DOUBLE_MAT4x3 0x8F4E - -#define GL_SAMPLER_1D 0x8B5D -#define GL_SAMPLER_2D 0x8B5E -#define GL_SAMPLER_3D 0x8B5F -#define GL_SAMPLER_CUBE 0x8B60 -#define GL_SAMPLER_BUFFER 0x8DC2 -#define GL_SAMPLER_1D_ARRAY 0x8DC0 -#define GL_SAMPLER_2D_ARRAY 0x8DC1 -#define GL_SAMPLER_1D_ARRAY_SHADOW 0x8DC3 -#define GL_SAMPLER_2D_ARRAY_SHADOW 0x8DC4 -#define GL_SAMPLER_CUBE_SHADOW 0x8DC5 -#define GL_SAMPLER_1D_SHADOW 0x8B61 -#define GL_SAMPLER_2D_SHADOW 0x8B62 -#define GL_SAMPLER_2D_RECT 0x8B63 -#define GL_SAMPLER_2D_RECT_SHADOW 0x8B64 -#define GL_SAMPLER_2D_MULTISAMPLE 0x9108 -#define GL_SAMPLER_2D_MULTISAMPLE_ARRAY 0x910B -#define GL_SAMPLER_CUBE_MAP_ARRAY 0x900C -#define GL_SAMPLER_CUBE_MAP_ARRAY_SHADOW 0x900D -#define GL_SAMPLER_CUBE_MAP_ARRAY_ARB 0x900C -#define GL_SAMPLER_CUBE_MAP_ARRAY_SHADOW_ARB 0x900D - -#define GL_INT_SAMPLER_1D 0x8DC9 -#define GL_INT_SAMPLER_2D 0x8DCA -#define GL_INT_SAMPLER_3D 0x8DCB -#define GL_INT_SAMPLER_CUBE 0x8DCC -#define GL_INT_SAMPLER_1D_ARRAY 0x8DCE -#define GL_INT_SAMPLER_2D_ARRAY 0x8DCF -#define GL_INT_SAMPLER_2D_RECT 0x8DCD -#define GL_INT_SAMPLER_BUFFER 0x8DD0 -#define GL_INT_SAMPLER_2D_MULTISAMPLE 0x9109 -#define GL_INT_SAMPLER_2D_MULTISAMPLE_ARRAY 0x910C -#define GL_INT_SAMPLER_CUBE_MAP_ARRAY 0x900E -#define GL_INT_SAMPLER_CUBE_MAP_ARRAY_ARB 0x900E - -#define GL_UNSIGNED_INT_SAMPLER_1D 0x8DD1 -#define GL_UNSIGNED_INT_SAMPLER_2D 0x8DD2 -#define GL_UNSIGNED_INT_SAMPLER_3D 0x8DD3 -#define GL_UNSIGNED_INT_SAMPLER_CUBE 0x8DD4 -#define GL_UNSIGNED_INT_SAMPLER_1D_ARRAY 0x8DD6 -#define GL_UNSIGNED_INT_SAMPLER_2D_ARRAY 0x8DD7 -#define GL_UNSIGNED_INT_SAMPLER_2D_RECT 0x8DD5 -#define GL_UNSIGNED_INT_SAMPLER_BUFFER 0x8DD8 -#define GL_UNSIGNED_INT_SAMPLER_2D_MULTISAMPLE_ARRAY 0x910D -#define GL_UNSIGNED_INT_SAMPLER_CUBE_MAP_ARRAY 0x900F -#define GL_UNSIGNED_INT_SAMPLER_CUBE_MAP_ARRAY_ARB 0x900F -#define GL_UNSIGNED_INT_SAMPLER_2D_MULTISAMPLE 0x910A - -#define GL_IMAGE_1D 0x904C -#define GL_IMAGE_2D 0x904D -#define GL_IMAGE_3D 0x904E -#define GL_IMAGE_2D_RECT 0x904F -#define GL_IMAGE_CUBE 0x9050 -#define GL_IMAGE_BUFFER 0x9051 -#define GL_IMAGE_1D_ARRAY 0x9052 -#define GL_IMAGE_2D_ARRAY 0x9053 -#define GL_IMAGE_CUBE_MAP_ARRAY 0x9054 -#define GL_IMAGE_2D_MULTISAMPLE 0x9055 -#define GL_IMAGE_2D_MULTISAMPLE_ARRAY 0x9056 -#define GL_INT_IMAGE_1D 0x9057 -#define GL_INT_IMAGE_2D 0x9058 -#define GL_INT_IMAGE_3D 0x9059 -#define GL_INT_IMAGE_2D_RECT 0x905A -#define GL_INT_IMAGE_CUBE 0x905B -#define GL_INT_IMAGE_BUFFER 0x905C -#define GL_INT_IMAGE_1D_ARRAY 0x905D -#define GL_INT_IMAGE_2D_ARRAY 0x905E -#define GL_INT_IMAGE_CUBE_MAP_ARRAY 0x905F -#define GL_INT_IMAGE_2D_MULTISAMPLE 0x9060 -#define GL_INT_IMAGE_2D_MULTISAMPLE_ARRAY 0x9061 -#define GL_UNSIGNED_INT_IMAGE_1D 0x9062 -#define GL_UNSIGNED_INT_IMAGE_2D 0x9063 -#define GL_UNSIGNED_INT_IMAGE_3D 0x9064 -#define GL_UNSIGNED_INT_IMAGE_2D_RECT 0x9065 -#define GL_UNSIGNED_INT_IMAGE_CUBE 0x9066 -#define GL_UNSIGNED_INT_IMAGE_BUFFER 0x9067 -#define GL_UNSIGNED_INT_IMAGE_1D_ARRAY 0x9068 -#define GL_UNSIGNED_INT_IMAGE_2D_ARRAY 0x9069 -#define GL_UNSIGNED_INT_IMAGE_CUBE_MAP_ARRAY 0x906A -#define GL_UNSIGNED_INT_IMAGE_2D_MULTISAMPLE 0x906B -#define GL_UNSIGNED_INT_IMAGE_2D_MULTISAMPLE_ARRAY 0x906C - -#define GL_UNSIGNED_INT_ATOMIC_COUNTER 0x92DB +/* +** Copyright (c) 2013 The Khronos Group Inc. +** +** Permission is hereby granted, free of charge, to any person obtaining a +** copy of this software and/or associated documentation files (the +** "Materials"), to deal in the Materials without restriction, including +** without limitation the rights to use, copy, modify, merge, publish, +** distribute, sublicense, and/or sell copies of the Materials, and to +** permit persons to whom the Materials are furnished to do so, subject to +** the following conditions: +** +** The above copyright notice and this permission notice shall be included +** in all copies or substantial portions of the Materials. +** +** THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +** EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +** MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +** IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +** CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +** TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +** MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS. +*/ + +#define GL_FLOAT 0x1406 +#define GL_FLOAT_VEC2 0x8B50 +#define GL_FLOAT_VEC3 0x8B51 +#define GL_FLOAT_VEC4 0x8B52 + +#define GL_DOUBLE 0x140A +#define GL_DOUBLE_VEC2 0x8FFC +#define GL_DOUBLE_VEC3 0x8FFD +#define GL_DOUBLE_VEC4 0x8FFE + +#define GL_INT 0x1404 +#define GL_INT_VEC2 0x8B53 +#define GL_INT_VEC3 0x8B54 +#define GL_INT_VEC4 0x8B55 + +#define GL_UNSIGNED_INT 0x1405 +#define GL_UNSIGNED_INT_VEC2 0x8DC6 +#define GL_UNSIGNED_INT_VEC3 0x8DC7 +#define GL_UNSIGNED_INT_VEC4 0x8DC8 + +#define GL_BOOL 0x8B56 +#define GL_BOOL_VEC2 0x8B57 +#define GL_BOOL_VEC3 0x8B58 +#define GL_BOOL_VEC4 0x8B59 + +#define GL_FLOAT_MAT2 0x8B5A +#define GL_FLOAT_MAT3 0x8B5B +#define GL_FLOAT_MAT4 0x8B5C +#define GL_FLOAT_MAT2x3 0x8B65 +#define GL_FLOAT_MAT2x4 0x8B66 +#define GL_FLOAT_MAT3x2 0x8B67 +#define GL_FLOAT_MAT3x4 0x8B68 +#define GL_FLOAT_MAT4x2 0x8B69 +#define GL_FLOAT_MAT4x3 0x8B6A + +#define GL_DOUBLE_MAT2 0x8F46 +#define GL_DOUBLE_MAT3 0x8F47 +#define GL_DOUBLE_MAT4 0x8F48 +#define GL_DOUBLE_MAT2x3 0x8F49 +#define GL_DOUBLE_MAT2x4 0x8F4A +#define GL_DOUBLE_MAT3x2 0x8F4B +#define GL_DOUBLE_MAT3x4 0x8F4C +#define GL_DOUBLE_MAT4x2 0x8F4D +#define GL_DOUBLE_MAT4x3 0x8F4E + +#define GL_SAMPLER_1D 0x8B5D +#define GL_SAMPLER_2D 0x8B5E +#define GL_SAMPLER_3D 0x8B5F +#define GL_SAMPLER_CUBE 0x8B60 +#define GL_SAMPLER_BUFFER 0x8DC2 +#define GL_SAMPLER_1D_ARRAY 0x8DC0 +#define GL_SAMPLER_2D_ARRAY 0x8DC1 +#define GL_SAMPLER_1D_ARRAY_SHADOW 0x8DC3 +#define GL_SAMPLER_2D_ARRAY_SHADOW 0x8DC4 +#define GL_SAMPLER_CUBE_SHADOW 0x8DC5 +#define GL_SAMPLER_1D_SHADOW 0x8B61 +#define GL_SAMPLER_2D_SHADOW 0x8B62 +#define GL_SAMPLER_2D_RECT 0x8B63 +#define GL_SAMPLER_2D_RECT_SHADOW 0x8B64 +#define GL_SAMPLER_2D_MULTISAMPLE 0x9108 +#define GL_SAMPLER_2D_MULTISAMPLE_ARRAY 0x910B +#define GL_SAMPLER_CUBE_MAP_ARRAY 0x900C +#define GL_SAMPLER_CUBE_MAP_ARRAY_SHADOW 0x900D +#define GL_SAMPLER_CUBE_MAP_ARRAY_ARB 0x900C +#define GL_SAMPLER_CUBE_MAP_ARRAY_SHADOW_ARB 0x900D + +#define GL_INT_SAMPLER_1D 0x8DC9 +#define GL_INT_SAMPLER_2D 0x8DCA +#define GL_INT_SAMPLER_3D 0x8DCB +#define GL_INT_SAMPLER_CUBE 0x8DCC +#define GL_INT_SAMPLER_1D_ARRAY 0x8DCE +#define GL_INT_SAMPLER_2D_ARRAY 0x8DCF +#define GL_INT_SAMPLER_2D_RECT 0x8DCD +#define GL_INT_SAMPLER_BUFFER 0x8DD0 +#define GL_INT_SAMPLER_2D_MULTISAMPLE 0x9109 +#define GL_INT_SAMPLER_2D_MULTISAMPLE_ARRAY 0x910C +#define GL_INT_SAMPLER_CUBE_MAP_ARRAY 0x900E +#define GL_INT_SAMPLER_CUBE_MAP_ARRAY_ARB 0x900E + +#define GL_UNSIGNED_INT_SAMPLER_1D 0x8DD1 +#define GL_UNSIGNED_INT_SAMPLER_2D 0x8DD2 +#define GL_UNSIGNED_INT_SAMPLER_3D 0x8DD3 +#define GL_UNSIGNED_INT_SAMPLER_CUBE 0x8DD4 +#define GL_UNSIGNED_INT_SAMPLER_1D_ARRAY 0x8DD6 +#define GL_UNSIGNED_INT_SAMPLER_2D_ARRAY 0x8DD7 +#define GL_UNSIGNED_INT_SAMPLER_2D_RECT 0x8DD5 +#define GL_UNSIGNED_INT_SAMPLER_BUFFER 0x8DD8 +#define GL_UNSIGNED_INT_SAMPLER_2D_MULTISAMPLE_ARRAY 0x910D +#define GL_UNSIGNED_INT_SAMPLER_CUBE_MAP_ARRAY 0x900F +#define GL_UNSIGNED_INT_SAMPLER_CUBE_MAP_ARRAY_ARB 0x900F +#define GL_UNSIGNED_INT_SAMPLER_2D_MULTISAMPLE 0x910A + +#define GL_IMAGE_1D 0x904C +#define GL_IMAGE_2D 0x904D +#define GL_IMAGE_3D 0x904E +#define GL_IMAGE_2D_RECT 0x904F +#define GL_IMAGE_CUBE 0x9050 +#define GL_IMAGE_BUFFER 0x9051 +#define GL_IMAGE_1D_ARRAY 0x9052 +#define GL_IMAGE_2D_ARRAY 0x9053 +#define GL_IMAGE_CUBE_MAP_ARRAY 0x9054 +#define GL_IMAGE_2D_MULTISAMPLE 0x9055 +#define GL_IMAGE_2D_MULTISAMPLE_ARRAY 0x9056 +#define GL_INT_IMAGE_1D 0x9057 +#define GL_INT_IMAGE_2D 0x9058 +#define GL_INT_IMAGE_3D 0x9059 +#define GL_INT_IMAGE_2D_RECT 0x905A +#define GL_INT_IMAGE_CUBE 0x905B +#define GL_INT_IMAGE_BUFFER 0x905C +#define GL_INT_IMAGE_1D_ARRAY 0x905D +#define GL_INT_IMAGE_2D_ARRAY 0x905E +#define GL_INT_IMAGE_CUBE_MAP_ARRAY 0x905F +#define GL_INT_IMAGE_2D_MULTISAMPLE 0x9060 +#define GL_INT_IMAGE_2D_MULTISAMPLE_ARRAY 0x9061 +#define GL_UNSIGNED_INT_IMAGE_1D 0x9062 +#define GL_UNSIGNED_INT_IMAGE_2D 0x9063 +#define GL_UNSIGNED_INT_IMAGE_3D 0x9064 +#define GL_UNSIGNED_INT_IMAGE_2D_RECT 0x9065 +#define GL_UNSIGNED_INT_IMAGE_CUBE 0x9066 +#define GL_UNSIGNED_INT_IMAGE_BUFFER 0x9067 +#define GL_UNSIGNED_INT_IMAGE_1D_ARRAY 0x9068 +#define GL_UNSIGNED_INT_IMAGE_2D_ARRAY 0x9069 +#define GL_UNSIGNED_INT_IMAGE_CUBE_MAP_ARRAY 0x906A +#define GL_UNSIGNED_INT_IMAGE_2D_MULTISAMPLE 0x906B +#define GL_UNSIGNED_INT_IMAGE_2D_MULTISAMPLE_ARRAY 0x906C + +#define GL_UNSIGNED_INT_ATOMIC_COUNTER 0x92DB diff --git a/glslang/MachineIndependent/limits.cpp b/glslang/MachineIndependent/limits.cpp index 4573d28..c543b6c 100644 --- a/glslang/MachineIndependent/limits.cpp +++ b/glslang/MachineIndependent/limits.cpp @@ -1,198 +1,198 @@ -// -//Copyright (C) 2013 LunarG, Inc. -// -//All rights reserved. -// -//Redistribution and use in source and binary forms, with or without -//modification, are permitted provided that the following conditions -//are met: -// -// Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// -// Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// -// Neither the name of 3Dlabs Inc. Ltd. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -//THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -//"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -//LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS -//FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE -//COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, -//INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, -//BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -//LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -//CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT -//LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN -//ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -//POSSIBILITY OF SUCH DAMAGE. -// - -// -// Do sub tree walks for -// 1) inductive loop bodies to see if the inductive variable is modified -// 2) array-index expressions to see if they are "constant-index-expression" -// -// These are per Appendix A of ES 2.0: -// -// "Within the body of the loop, the loop index is not statically assigned to nor is it used as the -// argument to a function out or inout parameter." -// -// "The following are constant-index-expressions: -// - Constant expressions -// - Loop indices as defined in section 4 -// - Expressions composed of both of the above" -// -// N.B.: assuming the last rule excludes function calls -// - -#include "ParseHelper.h" - -namespace glslang { - -// -// The inductive loop-body traverser. -// -// Just look at things that might modify the loop index. -// - -class TInductiveTraverser : public TIntermTraverser { -public: - TInductiveTraverser(int id, TSymbolTable& st) - : loopId(id), symbolTable(st), bad(false) { } - - virtual bool visitBinary(TVisit, TIntermBinary* node); - virtual bool visitUnary(TVisit, TIntermUnary* node); - virtual bool visitAggregate(TVisit, TIntermAggregate* node); - - int loopId; // unique ID of the symbol that's the loop inductive variable - TSymbolTable& symbolTable; - bool bad; - TSourceLoc badLoc; - -protected: - TInductiveTraverser(TInductiveTraverser&); - TInductiveTraverser& operator=(TInductiveTraverser&); -}; - -// check binary operations for those modifying the loop index -bool TInductiveTraverser::visitBinary(TVisit /* visit */, TIntermBinary* node) -{ - if (node->modifiesState() && node->getLeft()->getAsSymbolNode() && - node->getLeft()->getAsSymbolNode()->getId() == loopId) { - bad = true; - badLoc = node->getLoc(); - } - - return true; -} - -// check unary operations for those modifying the loop index -bool TInductiveTraverser::visitUnary(TVisit /* visit */, TIntermUnary* node) -{ - if (node->modifiesState() && node->getOperand()->getAsSymbolNode() && - node->getOperand()->getAsSymbolNode()->getId() == loopId) { - bad = true; - badLoc = node->getLoc(); - } - - return true; -} - -// check function calls for arguments modifying the loop index -bool TInductiveTraverser::visitAggregate(TVisit /* visit */, TIntermAggregate* node) -{ - if (node->getOp() == EOpFunctionCall) { - // see if an out or inout argument is the loop index - const TIntermSequence& args = node->getSequence(); - for (int i = 0; i < (int)args.size(); ++i) { - if (args[i]->getAsSymbolNode() && args[i]->getAsSymbolNode()->getId() == loopId) { - TSymbol* function = symbolTable.find(node->getName()); - const TType* type = (*function->getAsFunction())[i].type; - if (type->getQualifier().storage == EvqOut || - type->getQualifier().storage == EvqInOut) { - bad = true; - badLoc = node->getLoc(); - } - } - } - } - - return true; -} - -// -// External function to call for loop check. -// -void TParseContext::inductiveLoopBodyCheck(TIntermNode* body, int loopId, TSymbolTable& symbolTable) -{ - TInductiveTraverser it(loopId, symbolTable); - - if (! body) - return; - - body->traverse(&it); - - if (it.bad) - error(it.badLoc, "inductive loop index modified", "limitations", ""); -} - -// -// The "constant-index-expression" tranverser. -// -// Just look at things that can form an index. -// - -class TIndexTraverser : public TIntermTraverser { -public: - TIndexTraverser(const TIdSetType& ids) : inductiveLoopIds(ids), bad(false) { } - virtual void visitSymbol(TIntermSymbol* symbol); - virtual bool visitAggregate(TVisit, TIntermAggregate* node); - const TIdSetType& inductiveLoopIds; - bool bad; - TSourceLoc badLoc; - -protected: - TIndexTraverser(TIndexTraverser&); - TIndexTraverser& operator=(TIndexTraverser&); -}; - -// make sure symbols are inductive-loop indexes -void TIndexTraverser::visitSymbol(TIntermSymbol* symbol) -{ - if (inductiveLoopIds.find(symbol->getId()) == inductiveLoopIds.end()) { - bad = true; - badLoc = symbol->getLoc(); - } -} - -// check for function calls, assuming they are bad; spec. doesn't really say -bool TIndexTraverser::visitAggregate(TVisit /* visit */, TIntermAggregate* node) -{ - if (node->getOp() == EOpFunctionCall) { - bad = true; - badLoc = node->getLoc(); - } - - return true; -} - -// -// External function to call for loop check. -// -void TParseContext::constantIndexExpressionCheck(TIntermNode* index) -{ - TIndexTraverser it(inductiveLoopIds); - - index->traverse(&it); - - if (it.bad) - error(it.badLoc, "Non-constant-index-expression", "limitations", ""); -} - -} // end namespace glslang +// +//Copyright (C) 2013 LunarG, Inc. +// +//All rights reserved. +// +//Redistribution and use in source and binary forms, with or without +//modification, are permitted provided that the following conditions +//are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +//THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +//"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +//LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +//FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +//COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +//INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +//BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +//LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +//CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +//LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +//ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +//POSSIBILITY OF SUCH DAMAGE. +// + +// +// Do sub tree walks for +// 1) inductive loop bodies to see if the inductive variable is modified +// 2) array-index expressions to see if they are "constant-index-expression" +// +// These are per Appendix A of ES 2.0: +// +// "Within the body of the loop, the loop index is not statically assigned to nor is it used as the +// argument to a function out or inout parameter." +// +// "The following are constant-index-expressions: +// - Constant expressions +// - Loop indices as defined in section 4 +// - Expressions composed of both of the above" +// +// N.B.: assuming the last rule excludes function calls +// + +#include "ParseHelper.h" + +namespace glslang { + +// +// The inductive loop-body traverser. +// +// Just look at things that might modify the loop index. +// + +class TInductiveTraverser : public TIntermTraverser { +public: + TInductiveTraverser(int id, TSymbolTable& st) + : loopId(id), symbolTable(st), bad(false) { } + + virtual bool visitBinary(TVisit, TIntermBinary* node); + virtual bool visitUnary(TVisit, TIntermUnary* node); + virtual bool visitAggregate(TVisit, TIntermAggregate* node); + + int loopId; // unique ID of the symbol that's the loop inductive variable + TSymbolTable& symbolTable; + bool bad; + TSourceLoc badLoc; + +protected: + TInductiveTraverser(TInductiveTraverser&); + TInductiveTraverser& operator=(TInductiveTraverser&); +}; + +// check binary operations for those modifying the loop index +bool TInductiveTraverser::visitBinary(TVisit /* visit */, TIntermBinary* node) +{ + if (node->modifiesState() && node->getLeft()->getAsSymbolNode() && + node->getLeft()->getAsSymbolNode()->getId() == loopId) { + bad = true; + badLoc = node->getLoc(); + } + + return true; +} + +// check unary operations for those modifying the loop index +bool TInductiveTraverser::visitUnary(TVisit /* visit */, TIntermUnary* node) +{ + if (node->modifiesState() && node->getOperand()->getAsSymbolNode() && + node->getOperand()->getAsSymbolNode()->getId() == loopId) { + bad = true; + badLoc = node->getLoc(); + } + + return true; +} + +// check function calls for arguments modifying the loop index +bool TInductiveTraverser::visitAggregate(TVisit /* visit */, TIntermAggregate* node) +{ + if (node->getOp() == EOpFunctionCall) { + // see if an out or inout argument is the loop index + const TIntermSequence& args = node->getSequence(); + for (int i = 0; i < (int)args.size(); ++i) { + if (args[i]->getAsSymbolNode() && args[i]->getAsSymbolNode()->getId() == loopId) { + TSymbol* function = symbolTable.find(node->getName()); + const TType* type = (*function->getAsFunction())[i].type; + if (type->getQualifier().storage == EvqOut || + type->getQualifier().storage == EvqInOut) { + bad = true; + badLoc = node->getLoc(); + } + } + } + } + + return true; +} + +// +// External function to call for loop check. +// +void TParseContext::inductiveLoopBodyCheck(TIntermNode* body, int loopId, TSymbolTable& symbolTable) +{ + TInductiveTraverser it(loopId, symbolTable); + + if (! body) + return; + + body->traverse(&it); + + if (it.bad) + error(it.badLoc, "inductive loop index modified", "limitations", ""); +} + +// +// The "constant-index-expression" tranverser. +// +// Just look at things that can form an index. +// + +class TIndexTraverser : public TIntermTraverser { +public: + TIndexTraverser(const TIdSetType& ids) : inductiveLoopIds(ids), bad(false) { } + virtual void visitSymbol(TIntermSymbol* symbol); + virtual bool visitAggregate(TVisit, TIntermAggregate* node); + const TIdSetType& inductiveLoopIds; + bool bad; + TSourceLoc badLoc; + +protected: + TIndexTraverser(TIndexTraverser&); + TIndexTraverser& operator=(TIndexTraverser&); +}; + +// make sure symbols are inductive-loop indexes +void TIndexTraverser::visitSymbol(TIntermSymbol* symbol) +{ + if (inductiveLoopIds.find(symbol->getId()) == inductiveLoopIds.end()) { + bad = true; + badLoc = symbol->getLoc(); + } +} + +// check for function calls, assuming they are bad; spec. doesn't really say +bool TIndexTraverser::visitAggregate(TVisit /* visit */, TIntermAggregate* node) +{ + if (node->getOp() == EOpFunctionCall) { + bad = true; + badLoc = node->getLoc(); + } + + return true; +} + +// +// External function to call for loop check. +// +void TParseContext::constantIndexExpressionCheck(TIntermNode* index) +{ + TIndexTraverser it(inductiveLoopIds); + + index->traverse(&it); + + if (it.bad) + error(it.badLoc, "Non-constant-index-expression", "limitations", ""); +} + +} // end namespace glslang diff --git a/glslang/MachineIndependent/linkValidate.cpp b/glslang/MachineIndependent/linkValidate.cpp index 7e755b4..eabf999 100644 --- a/glslang/MachineIndependent/linkValidate.cpp +++ b/glslang/MachineIndependent/linkValidate.cpp @@ -1,984 +1,984 @@ -// -//Copyright (C) 2013 LunarG, Inc. -// -//All rights reserved. -// -//Redistribution and use in source and binary forms, with or without -//modification, are permitted provided that the following conditions -//are met: -// -// Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// -// Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// -// Neither the name of 3Dlabs Inc. Ltd. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -//THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -//"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -//LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS -//FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE -//COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, -//INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, -//BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -//LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -//CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT -//LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN -//ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -//POSSIBILITY OF SUCH DAMAGE. -// - -// -// Do link-time merging and validation of intermediate representations. -// -// Basic model is that during compilation, each compilation unit (shader) is -// compiled into one TIntermediate instance. Then, at link time, multiple -// units for the same stage can be merged together, which can generate errors. -// Then, after all merging, a single instance of TIntermediate represents -// the whole stage. A final error check can be done on the resulting stage, -// even if no merging was done (i.e., the stage was only one compilation unit). -// - -#include "localintermediate.h" -#include "../Include/InfoSink.h" - -namespace glslang { - -// -// Link-time error emitter. -// -void TIntermediate::error(TInfoSink& infoSink, const char* message) -{ - infoSink.info.prefix(EPrefixError); - infoSink.info << "Linking " << StageName(language) << " stage: " << message << "\n"; - - ++numErrors; -} - -// TODO: 4.4 offset/align: "Two blocks linked together in the same program with the same block -// name must have the exact same set of members qualified with offset and their integral-constant -// expression values must be the same, or a link-time error results." - -// -// Merge the information from 'unit' into 'this' -// -void TIntermediate::merge(TInfoSink& infoSink, TIntermediate& unit) -{ - numMains += unit.numMains; - numErrors += unit.numErrors; - callGraph.insert(callGraph.end(), unit.callGraph.begin(), unit.callGraph.end()); - - if ((profile != EEsProfile && unit.profile == EEsProfile) || - (profile == EEsProfile && unit.profile != EEsProfile)) - error(infoSink, "Cannot mix ES profile with non-ES profile shaders\n"); - - if (originUpperLeft != unit.originUpperLeft || pixelCenterInteger != unit.pixelCenterInteger) - error(infoSink, "gl_FragCoord redeclarations must match across shaders\n"); - - if (! earlyFragmentTests) - earlyFragmentTests = unit.earlyFragmentTests; - - if (depthLayout == EldNone) - depthLayout = unit.depthLayout; - else if (depthLayout != unit.depthLayout) - error(infoSink, "Contradictory depth layouts"); - - if (inputPrimitive == ElgNone) - inputPrimitive = unit.inputPrimitive; - else if (inputPrimitive != unit.inputPrimitive) - error(infoSink, "Contradictory input layout primitives"); - - if (outputPrimitive == ElgNone) - outputPrimitive = unit.outputPrimitive; - else if (outputPrimitive != unit.outputPrimitive) - error(infoSink, "Contradictory output layout primitives"); - - if (vertices == 0) - vertices = unit.vertices; - else if (vertices != unit.vertices) { - if (language == EShLangGeometry) - error(infoSink, "Contradictory layout max_vertices values"); - else if (language == EShLangTessControl) - error(infoSink, "Contradictory layout vertices values"); - else - assert(0); - } - - if (vertexSpacing == EvsNone) - vertexSpacing = unit.vertexSpacing; - else if (vertexSpacing != unit.vertexSpacing) - error(infoSink, "Contradictory input vertex spacing"); - - if (vertexOrder == EvoNone) - vertexOrder = unit.vertexOrder; - else if (vertexOrder != unit.vertexOrder) - error(infoSink, "Contradictory triangle ordering"); - - if (unit.pointMode) - pointMode = true; - - for (int i = 0; i < 3; ++i) { - if (localSize[i] > 1) - localSize[i] = unit.localSize[i]; - else if (localSize[i] != unit.localSize[i]) - error(infoSink, "Contradictory local size"); - } - - if (unit.xfbMode) - xfbMode = true; - for (size_t b = 0; b < xfbBuffers.size(); ++b) { - if (xfbBuffers[b].stride == TQualifier::layoutXfbStrideEnd) - xfbBuffers[b].stride = unit.xfbBuffers[b].stride; - else if (xfbBuffers[b].stride != unit.xfbBuffers[b].stride) - error(infoSink, "Contradictory xfb_stride"); - xfbBuffers[b].implicitStride = std::max(xfbBuffers[b].implicitStride, unit.xfbBuffers[b].implicitStride); - if (unit.xfbBuffers[b].containsDouble) - xfbBuffers[b].containsDouble = true; - // TODO: 4.4 link: enhanced layouts: compare ranges - } - - if (unit.treeRoot == 0) - return; - - if (treeRoot == 0) { - treeRoot = unit.treeRoot; - version = unit.version; - requestedExtensions = unit.requestedExtensions; - return; - } - - // Getting this far means we have two existing trees to merge... - - version = std::max(version, unit.version); - requestedExtensions.insert(unit.requestedExtensions.begin(), unit.requestedExtensions.end()); - - // Get the top-level globals of each unit - TIntermSequence& globals = treeRoot->getAsAggregate()->getSequence(); - TIntermSequence& unitGlobals = unit.treeRoot->getAsAggregate()->getSequence(); - - // Get the linker-object lists - TIntermSequence& linkerObjects = findLinkerObjects(); - TIntermSequence& unitLinkerObjects = unit.findLinkerObjects(); - - mergeBodies(infoSink, globals, unitGlobals); - mergeLinkerObjects(infoSink, linkerObjects, unitLinkerObjects); - - ioAccessed.insert(unit.ioAccessed.begin(), unit.ioAccessed.end()); -} - -// -// Merge the function bodies and global-level initializers from unitGlobals into globals. -// Will error check duplication of function bodies for the same signature. -// -void TIntermediate::mergeBodies(TInfoSink& infoSink, TIntermSequence& globals, const TIntermSequence& unitGlobals) -{ - // TODO: link-time performance: Processing in alphabetical order will be faster - - // Error check the global objects, not including the linker objects - for (unsigned int child = 0; child < globals.size() - 1; ++child) { - for (unsigned int unitChild = 0; unitChild < unitGlobals.size() - 1; ++unitChild) { - TIntermAggregate* body = globals[child]->getAsAggregate(); - TIntermAggregate* unitBody = unitGlobals[unitChild]->getAsAggregate(); - if (body && unitBody && body->getOp() == EOpFunction && unitBody->getOp() == EOpFunction && body->getName() == unitBody->getName()) { - error(infoSink, "Multiple function bodies in multiple compilation units for the same signature in the same stage:"); - infoSink.info << " " << globals[child]->getAsAggregate()->getName() << "\n"; - } - } - } - - // Merge the global objects, just in front of the linker objects - globals.insert(globals.end() - 1, unitGlobals.begin(), unitGlobals.end() - 1); -} - -// -// Merge the linker objects from unitLinkerObjects into linkerObjects. -// Duplication is expected and filtered out, but contradictions are an error. -// -void TIntermediate::mergeLinkerObjects(TInfoSink& infoSink, TIntermSequence& linkerObjects, const TIntermSequence& unitLinkerObjects) -{ - // Error check and merge the linker objects (duplicates should not be created) - std::size_t initialNumLinkerObjects = linkerObjects.size(); - for (unsigned int unitLinkObj = 0; unitLinkObj < unitLinkerObjects.size(); ++unitLinkObj) { - bool merge = true; - for (std::size_t linkObj = 0; linkObj < initialNumLinkerObjects; ++linkObj) { - TIntermSymbol* symbol = linkerObjects[linkObj]->getAsSymbolNode(); - TIntermSymbol* unitSymbol = unitLinkerObjects[unitLinkObj]->getAsSymbolNode(); - assert(symbol && unitSymbol); - if (symbol->getName() == unitSymbol->getName()) { - // filter out copy - merge = false; - - // but if one has an initializer and the other does not, update - // the initializer - if (symbol->getConstArray().empty() && ! unitSymbol->getConstArray().empty()) - symbol->setConstArray(unitSymbol->getConstArray()); - - // Similarly for binding - if (! symbol->getQualifier().hasBinding() && unitSymbol->getQualifier().hasBinding()) - symbol->getQualifier().layoutBinding = unitSymbol->getQualifier().layoutBinding; - - // Update implicit array sizes - mergeImplicitArraySizes(symbol->getWritableType(), unitSymbol->getType()); - - // Check for consistent types/qualification/initializers etc. - mergeErrorCheck(infoSink, *symbol, *unitSymbol, false); - } - } - if (merge) - linkerObjects.push_back(unitLinkerObjects[unitLinkObj]); - } -} - -// TODO 4.5 link functionality: cull distance array size checking - -// Recursively merge the implicit array sizes through the objects' respective type trees. -void TIntermediate::mergeImplicitArraySizes(TType& type, const TType& unitType) -{ - if (type.isImplicitlySizedArray() && unitType.isArray()) { - int newImplicitArraySize = unitType.getArraySize(); - if (newImplicitArraySize == 0) - newImplicitArraySize = unitType.getImplicitArraySize(); - if (newImplicitArraySize > type.getImplicitArraySize ()) - type.setImplicitArraySize(newImplicitArraySize); - } - - // Type mismatches are caught and reported after this, just be careful for now. - if (! type.isStruct() || ! unitType.isStruct() || type.getStruct()->size() != unitType.getStruct()->size()) - return; - - for (int i = 0; i < (int)type.getStruct()->size(); ++i) - mergeImplicitArraySizes(*(*type.getStruct())[i].type, *(*unitType.getStruct())[i].type); -} - -// -// Compare two global objects from two compilation units and see if they match -// well enough. Rules can be different for intra- vs. cross-stage matching. -// -// This function only does one of intra- or cross-stage matching per call. -// -void TIntermediate::mergeErrorCheck(TInfoSink& infoSink, const TIntermSymbol& symbol, const TIntermSymbol& unitSymbol, bool crossStage) -{ - bool writeTypeComparison = false; - - // Types have to match - if (symbol.getType() != unitSymbol.getType()) { - error(infoSink, "Types must match:"); - writeTypeComparison = true; - } - - // Qualifiers have to (almost) match - - // Storage... - if (symbol.getQualifier().storage != unitSymbol.getQualifier().storage) { - error(infoSink, "Storage qualifiers must match:"); - writeTypeComparison = true; - } - - // Precision... - if (symbol.getQualifier().precision != unitSymbol.getQualifier().precision) { - error(infoSink, "Precision qualifiers must match:"); - writeTypeComparison = true; - } - - // Invariance... - if (! crossStage && symbol.getQualifier().invariant != unitSymbol.getQualifier().invariant) { - error(infoSink, "Presence of invariant qualifier must match:"); - writeTypeComparison = true; - } - - // Auxiliary and interpolation... - if (symbol.getQualifier().centroid != unitSymbol.getQualifier().centroid || - symbol.getQualifier().smooth != unitSymbol.getQualifier().smooth || - symbol.getQualifier().flat != unitSymbol.getQualifier().flat || - symbol.getQualifier().sample != unitSymbol.getQualifier().sample || - symbol.getQualifier().patch != unitSymbol.getQualifier().patch || - symbol.getQualifier().nopersp != unitSymbol.getQualifier().nopersp) { - error(infoSink, "Interpolation and auxiliary storage qualifiers must match:"); - writeTypeComparison = true; - } - - // Memory... - if (symbol.getQualifier().coherent != unitSymbol.getQualifier().coherent || - symbol.getQualifier().volatil != unitSymbol.getQualifier().volatil || - symbol.getQualifier().restrict != unitSymbol.getQualifier().restrict || - symbol.getQualifier().readonly != unitSymbol.getQualifier().readonly || - symbol.getQualifier().writeonly != unitSymbol.getQualifier().writeonly) { - error(infoSink, "Memory qualifiers must match:"); - writeTypeComparison = true; - } - - // Layouts... - // TODO: 4.4 enhanced layouts: Generalize to include offset/align: current spec - // requires separate user-supplied offset from actual computed offset, but - // current implementation only has one offset. - if (symbol.getQualifier().layoutMatrix != unitSymbol.getQualifier().layoutMatrix || - symbol.getQualifier().layoutPacking != unitSymbol.getQualifier().layoutPacking || - symbol.getQualifier().layoutLocation != unitSymbol.getQualifier().layoutLocation || - symbol.getQualifier().layoutComponent != unitSymbol.getQualifier().layoutComponent || - symbol.getQualifier().layoutIndex != unitSymbol.getQualifier().layoutIndex || - symbol.getQualifier().layoutBinding != unitSymbol.getQualifier().layoutBinding || - (symbol.getQualifier().hasBinding() && (symbol.getQualifier().layoutOffset != unitSymbol.getQualifier().layoutOffset))) { - error(infoSink, "Layout qualification must match:"); - writeTypeComparison = true; - } - - // Initializers have to match, if both are present, and if we don't already know the types don't match - if (! writeTypeComparison) { - if (! symbol.getConstArray().empty() && ! unitSymbol.getConstArray().empty()) { - if (symbol.getConstArray() != unitSymbol.getConstArray()) { - error(infoSink, "Initializers must match:"); - infoSink.info << " " << symbol.getName() << "\n"; - } - } - } - - if (writeTypeComparison) - infoSink.info << " " << symbol.getName() << ": \"" << symbol.getType().getCompleteString() << "\" versus \"" << - unitSymbol.getType().getCompleteString() << "\"\n"; -} - -// -// Do final link-time error checking of a complete (merged) intermediate representation. -// (Much error checking was done during merging). -// -// Also, lock in defaults of things not set, including array sizes. -// -void TIntermediate::finalCheck(TInfoSink& infoSink) -{ - if (numMains < 1) - error(infoSink, "Missing entry point: Each stage requires one \"void main()\" entry point"); - - // recursion checking - checkCallGraphCycles(infoSink); - - // overlap/alias/missing I/O, etc. - inOutLocationCheck(infoSink); - - if (inIoAccessed("gl_ClipDistance") && inIoAccessed("gl_ClipVertex")) - error(infoSink, "Can only use one of gl_ClipDistance or gl_ClipVertex (gl_ClipDistance is preferred)"); - - if (userOutputUsed() && (inIoAccessed("gl_FragColor") || inIoAccessed("gl_FragData"))) - error(infoSink, "Cannot use gl_FragColor or gl_FragData when using user-defined outputs"); - if (inIoAccessed("gl_FragColor") && inIoAccessed("gl_FragData")) - error(infoSink, "Cannot use both gl_FragColor and gl_FragData"); - - for (size_t b = 0; b < xfbBuffers.size(); ++b) { - if (xfbBuffers[b].containsDouble) - RoundToPow2(xfbBuffers[b].implicitStride, 8); - - // "It is a compile-time or link-time error to have - // any xfb_offset that overflows xfb_stride, whether stated on declarations before or after the xfb_stride, or - // in different compilation units. While xfb_stride can be declared multiple times for the same buffer, it is a - // compile-time or link-time error to have different values specified for the stride for the same buffer." - if (xfbBuffers[b].stride != TQualifier::layoutXfbStrideEnd && xfbBuffers[b].implicitStride > xfbBuffers[b].stride) { - error(infoSink, "xfb_stride is too small to hold all buffer entries:"); - infoSink.info.prefix(EPrefixError); - infoSink.info << " xfb_buffer " << (unsigned int)b << ", xfb_stride " << xfbBuffers[b].stride << ", minimum stride needed: " << xfbBuffers[b].implicitStride << "\n"; - } - if (xfbBuffers[b].stride == TQualifier::layoutXfbStrideEnd) - xfbBuffers[b].stride = xfbBuffers[b].implicitStride; - - // "If the buffer is capturing any - // outputs with double-precision components, the stride must be a multiple of 8, otherwise it must be a - // multiple of 4, or a compile-time or link-time error results." - if (xfbBuffers[b].containsDouble && ! IsMultipleOfPow2(xfbBuffers[b].stride, 8)) { - error(infoSink, "xfb_stride must be multiple of 8 for buffer holding a double:"); - infoSink.info.prefix(EPrefixError); - infoSink.info << " xfb_buffer " << (unsigned int)b << ", xfb_stride " << xfbBuffers[b].stride << "\n"; - } else if (! IsMultipleOfPow2(xfbBuffers[b].stride, 4)) { - error(infoSink, "xfb_stride must be multiple of 4:"); - infoSink.info.prefix(EPrefixError); - infoSink.info << " xfb_buffer " << (unsigned int)b << ", xfb_stride " << xfbBuffers[b].stride << "\n"; - } - - // "The resulting stride (implicit or explicit), when divided by 4, must be less than or equal to the - // implementation-dependent constant gl_MaxTransformFeedbackInterleavedComponents." - if (xfbBuffers[b].stride > (unsigned int)(4 * resources.maxTransformFeedbackInterleavedComponents)) { - error(infoSink, "xfb_stride is too large:"); - infoSink.info.prefix(EPrefixError); - infoSink.info << " xfb_buffer " << (unsigned int)b << ", components (1/4 stride) needed are " << xfbBuffers[b].stride/4 << ", gl_MaxTransformFeedbackInterleavedComponents is " << resources.maxTransformFeedbackInterleavedComponents << "\n"; - } - } - - switch (language) { - case EShLangVertex: - break; - case EShLangTessControl: - if (vertices == 0) - error(infoSink, "At least one shader must specify an output layout(vertices=...)"); - break; - case EShLangTessEvaluation: - if (inputPrimitive == ElgNone) - error(infoSink, "At least one shader must specify an input layout primitive"); - if (vertexSpacing == EvsNone) - vertexSpacing = EvsEqual; - if (vertexOrder == EvoNone) - vertexOrder = EvoCcw; - break; - case EShLangGeometry: - if (inputPrimitive == ElgNone) - error(infoSink, "At least one shader must specify an input layout primitive"); - if (outputPrimitive == ElgNone) - error(infoSink, "At least one shader must specify an output layout primitive"); - if (vertices == 0) - error(infoSink, "At least one shader must specify a layout(max_vertices = value)"); - break; - case EShLangFragment: - break; - case EShLangCompute: - break; - default: - error(infoSink, "Unknown Stage."); - break; - } - - // Process the tree for any node-specific work. - class TFinalLinkTraverser : public TIntermTraverser { - public: - TFinalLinkTraverser() { } - virtual ~TFinalLinkTraverser() { } - - virtual void visitSymbol(TIntermSymbol* symbol) - { - // Implicitly size arrays. - symbol->getWritableType().adoptImplicitArraySizes(); - } - } finalLinkTraverser; - - treeRoot->traverse(&finalLinkTraverser); -} - -// -// See if the call graph contains any static recursion, which is disallowed -// by the specification. -// -void TIntermediate::checkCallGraphCycles(TInfoSink& infoSink) -{ - // Reset everything, once. - for (TGraph::iterator call = callGraph.begin(); call != callGraph.end(); ++call) { - call->visited = false; - call->currentPath = false; - call->errorGiven = false; - } - - // - // Loop, looking for a new connected subgraph. One subgraph is handled per loop iteration. - // - - TCall* newRoot; - do { - // See if we have unvisited parts of the graph. - newRoot = 0; - for (TGraph::iterator call = callGraph.begin(); call != callGraph.end(); ++call) { - if (! call->visited) { - newRoot = &(*call); - break; - } - } - - // If not, we are done. - if (! newRoot) - break; - - // Otherwise, we found a new subgraph, process it: - // See what all can be reached by this new root, and if any of - // that is recursive. This is done by depth-first traversals, seeing - // if a new call is found that was already in the currentPath (a back edge), - // thereby detecting recursion. - std::list stack; - newRoot->currentPath = true; // currentPath will be true iff it is on the stack - stack.push_back(newRoot); - while (! stack.empty()) { - // get a caller - TCall* call = stack.back(); - - // Add to the stack just one callee. - // This algorithm always terminates, because only !visited and !currentPath causes a push - // and all pushes change currentPath to true, and all pops change visited to true. - TGraph::iterator child = callGraph.begin(); - for (; child != callGraph.end(); ++child) { - - // If we already visited this node, its whole subgraph has already been processed, so skip it. - if (child->visited) - continue; - - if (call->callee == child->caller) { - if (child->currentPath) { - // Then, we found a back edge - if (! child->errorGiven) { - error(infoSink, "Recursion detected:"); - infoSink.info << " " << call->callee << " calling " << child->callee << "\n"; - child->errorGiven = true; - recursive = true; - } - } else { - child->currentPath = true; - stack.push_back(&(*child)); - break; - } - } - } - if (child == callGraph.end()) { - // no more callees, we bottomed out, never look at this node again - stack.back()->currentPath = false; - stack.back()->visited = true; - stack.pop_back(); - } - } // end while, meaning nothing left to process in this subtree - - } while (newRoot); // redundant loop check; should always exit via the 'break' above -} - -// -// Satisfy rules for location qualifiers on inputs and outputs -// -void TIntermediate::inOutLocationCheck(TInfoSink& infoSink) -{ - // ES 3.0 requires all outputs to have location qualifiers if there is more than one output - bool fragOutHasLocation = false; - bool fragOutWithNoLocation = false; - int numFragOut = 0; - - // TODO: linker functionality: location collision checking - - TIntermSequence& linkObjects = findLinkerObjects(); - for (size_t i = 0; i < linkObjects.size(); ++i) { - const TType& type = linkObjects[i]->getAsTyped()->getType(); - const TQualifier& qualifier = type.getQualifier(); - if (language == EShLangFragment) { - if (qualifier.storage == EvqVaryingOut) { - ++numFragOut; - if (qualifier.hasAnyLocation()) - fragOutHasLocation = true; - else - fragOutWithNoLocation = true; - } - } - } - - if (profile == EEsProfile) { - if (numFragOut > 1 && fragOutWithNoLocation) - error(infoSink, "when more than one fragment shader output, all must have location qualifiers"); - } -} - -TIntermSequence& TIntermediate::findLinkerObjects() const -{ - // Get the top-level globals - TIntermSequence& globals = treeRoot->getAsAggregate()->getSequence(); - - // Get the last member of the sequences, expected to be the linker-object lists - assert(globals.back()->getAsAggregate()->getOp() == EOpLinkerObjects); - - return globals.back()->getAsAggregate()->getSequence(); -} - -// See if a variable was both a user-declared output and used. -// Note: the spec discusses writing to one, but this looks at read or write, which -// is more useful, and perhaps the spec should be changed to reflect that. -bool TIntermediate::userOutputUsed() const -{ - const TIntermSequence& linkerObjects = findLinkerObjects(); - - bool found = false; - for (size_t i = 0; i < linkerObjects.size(); ++i) { - const TIntermSymbol& symbolNode = *linkerObjects[i]->getAsSymbolNode(); - if (symbolNode.getQualifier().storage == EvqVaryingOut && - symbolNode.getName().compare(0, 3, "gl_") != 0 && - inIoAccessed(symbolNode.getName())) { - found = true; - break; - } - } - - return found; -} - -// Accumulate locations used for inputs, outputs, and uniforms, and check for collisions -// as the accumulation is done. -// -// Returns < 0 if no collision, >= 0 if collision and the value returned is a colliding value. -// -// typeCollision is set to true if there is no direct collision, but the types in the same location -// are different. -// -int TIntermediate::addUsedLocation(const TQualifier& qualifier, const TType& type, bool& typeCollision) -{ - typeCollision = false; - - int set; - if (qualifier.isPipeInput()) - set = 0; - else if (qualifier.isPipeOutput()) - set = 1; - else if (qualifier.storage == EvqUniform) - set = 2; - else if (qualifier.storage == EvqBuffer) - set = 3; - else - return -1; - - int size; - if (qualifier.isUniformOrBuffer()) { - if (type.isArray()) - size = type.getArraySize(); - else - size = 1; - } else { - // Strip off the outer array dimension for those having an extra one. - if (type.isArray() && qualifier.isArrayedIo(language)) { - TType elementType(type, 0); - size = computeTypeLocationSize(elementType); - } else - size = computeTypeLocationSize(type); - } - - TRange locationRange(qualifier.layoutLocation, qualifier.layoutLocation + size - 1); - TRange componentRange(0, 3); - if (qualifier.hasComponent()) { - componentRange.start = qualifier.layoutComponent; - componentRange.last = componentRange.start + type.getVectorSize() - 1; - } - TIoRange range(locationRange, componentRange, type.getBasicType(), qualifier.hasIndex() ? qualifier.layoutIndex : 0); - - // check for collisions, except for vertex inputs on desktop - if (! (profile != EEsProfile && language == EShLangVertex && qualifier.isPipeInput())) { - for (size_t r = 0; r < usedIo[set].size(); ++r) { - if (range.overlap(usedIo[set][r])) { - // there is a collision; pick one - return std::max(locationRange.start, usedIo[set][r].location.start); - } else if (locationRange.overlap(usedIo[set][r].location) && type.getBasicType() != usedIo[set][r].basicType) { - // aliased-type mismatch - typeCollision = true; - return std::max(locationRange.start, usedIo[set][r].location.start); - } - } - } - - usedIo[set].push_back(range); - - return -1; // no collision -} - -// Accumulate locations used for inputs, outputs, and uniforms, and check for collisions -// as the accumulation is done. -// -// Returns < 0 if no collision, >= 0 if collision and the value returned is a colliding value. -// -int TIntermediate::addUsedOffsets(int binding, int offset, int numOffsets) -{ - TRange bindingRange(binding, binding); - TRange offsetRange(offset, offset + numOffsets - 1); - TOffsetRange range(bindingRange, offsetRange); - - // check for collisions, except for vertex inputs on desktop - for (size_t r = 0; r < usedAtomics.size(); ++r) { - if (range.overlap(usedAtomics[r])) { - // there is a collision; pick one - return std::max(offset, usedAtomics[r].offset.start); - } - } - - usedAtomics.push_back(range); - - return -1; // no collision -} - -// Recursively figure out how many locations are used up by an input or output type. -// Return the size of type, as measured by "locations". -int TIntermediate::computeTypeLocationSize(const TType& type) const -{ - // "If the declared input is an array of size n and each element takes m locations, it will be assigned m * n - // consecutive locations..." - if (type.isArray()) { - TType elementType(type, 0); - if (type.isImplicitlySizedArray()) { - // TODO: are there valid cases of having an implicitly-sized array with a location? If so, running this code too early. - return computeTypeLocationSize(elementType); - } else - return type.getArraySize() * computeTypeLocationSize(elementType); - } - - // "The locations consumed by block and structure members are determined by applying the rules above - // recursively..." - if (type.isStruct()) { - int size = 0; - for (int member = 0; member < (int)type.getStruct()->size(); ++member) { - TType memberType(type, member); - size += computeTypeLocationSize(memberType); - } - return size; - } - - // ES: "If a shader input is any scalar or vector type, it will consume a single location." - - // Desktop: "If a vertex shader input is any scalar or vector type, it will consume a single location. If a non-vertex - // shader input is a scalar or vector type other than dvec3 or dvec4, it will consume a single location, while - // types dvec3 or dvec4 will consume two consecutive locations. Inputs of type double and dvec2 will - // consume only a single location, in all stages." - if (type.isScalar()) - return 1; - if (type.isVector()) { - if (language == EShLangVertex && type.getQualifier().isPipeInput()) - return 1; - if (type.getBasicType() == EbtDouble && type.getVectorSize() > 2) - return 2; - else - return 1; - } - - // "If the declared input is an n x m single- or double-precision matrix, ... - // The number of locations assigned for each matrix will be the same as - // for an n-element array of m-component vectors..." - if (type.isMatrix()) { - TType columnType(type, 0); - return type.getMatrixCols() * computeTypeLocationSize(columnType); - } - - assert(0); - return 1; -} - -// Accumulate xfb buffer ranges and check for collisions as the accumulation is done. -// -// Returns < 0 if no collision, >= 0 if collision and the value returned is a colliding value. -// -int TIntermediate::addXfbBufferOffset(const TType& type) -{ - const TQualifier& qualifier = type.getQualifier(); - - assert(qualifier.hasXfbOffset() && qualifier.hasXfbBuffer()); - TXfbBuffer& buffer = xfbBuffers[qualifier.layoutXfbBuffer]; - - // compute the range - unsigned int size = computeTypeXfbSize(type, buffer.containsDouble); - buffer.implicitStride = std::max(buffer.implicitStride, qualifier.layoutXfbOffset + size); - TRange range(qualifier.layoutXfbOffset, qualifier.layoutXfbOffset + size - 1); - - // check for collisions - for (size_t r = 0; r < buffer.ranges.size(); ++r) { - if (range.overlap(buffer.ranges[r])) { - // there is a collision; pick an example to return - return std::max(range.start, buffer.ranges[r].start); - } - } - - buffer.ranges.push_back(range); - - return -1; // no collision -} - -// Recursively figure out how many bytes of xfb buffer are used by the given type. -// Return the size of type, in bytes. -// Sets containsDouble to true if the type contains a double. -// N.B. Caller must set containsDouble to false before calling. -unsigned int TIntermediate::computeTypeXfbSize(const TType& type, bool& containsDouble) const -{ - // "...if applied to an aggregate containing a double, the offset must also be a multiple of 8, - // and the space taken in the buffer will be a multiple of 8. - // ...within the qualified entity, subsequent components are each - // assigned, in order, to the next available offset aligned to a multiple of - // that component's size. Aggregate types are flattened down to the component - // level to get this sequence of components." - - if (type.isArray()) { - assert(type.isExplicitlySizedArray()); - TType elementType(type, 0); - return type.getArraySize() * computeTypeXfbSize(elementType, containsDouble); - } - - if (type.isStruct()) { - unsigned int size = 0; - bool structContainsDouble = false; - for (int member = 0; member < (int)type.getStruct()->size(); ++member) { - TType memberType(type, member); - // "... if applied to - // an aggregate containing a double, the offset must also be a multiple of 8, - // and the space taken in the buffer will be a multiple of 8." - bool memberContainsDouble = false; - int memberSize = computeTypeXfbSize(memberType, memberContainsDouble); - if (memberContainsDouble) { - structContainsDouble = true; - RoundToPow2(size, 8); - } - size += memberSize; - } - - if (structContainsDouble) { - containsDouble = true; - RoundToPow2(size, 8); - } - return size; - } - - int numComponents; - if (type.isScalar()) - numComponents = 1; - else if (type.isVector()) - numComponents = type.getVectorSize(); - else if (type.isMatrix()) - numComponents = type.getMatrixCols() * type.getMatrixRows(); - else { - assert(0); - numComponents = 1; - } - - if (type.getBasicType() == EbtDouble) { - containsDouble = true; - return 8 * numComponents; - } else - return 4 * numComponents; -} - -const int baseAlignmentVec4Std140 = 16; - -// Return the size and alignment of a scalar. -// The size is returned in the 'size' parameter -// Return value is the alignment of the type. -int TIntermediate::getBaseAlignmentScalar(const TType& type, int& size) -{ - switch (type.getBasicType()) { - case EbtDouble: size = 8; return 8; - default: size = 4; return 4; - } -} - -// Implement base-alignment and size rules from section 7.6.2.2 Standard Uniform Block Layout -// Operates recursively. -// -// If std140 is true, it does the rounding up to vec4 size required by std140, -// otherwise it does not, yielding std430 rules. -// -// The size is returned in the 'size' parameter -// Return value is the alignment of the type. -int TIntermediate::getBaseAlignment(const TType& type, int& size, bool std140) -{ - int alignment; - - // When using the std140 storage layout, structures will be laid out in buffer - // storage with its members stored in monotonically increasing order based on their - // location in the declaration. A structure and each structure member have a base - // offset and a base alignment, from which an aligned offset is computed by rounding - // the base offset up to a multiple of the base alignment. The base offset of the first - // member of a structure is taken from the aligned offset of the structure itself. The - // base offset of all other structure members is derived by taking the offset of the - // last basic machine unit consumed by the previous member and adding one. Each - // structure member is stored in memory at its aligned offset. The members of a top- - // level uniform block are laid out in buffer storage by treating the uniform block as - // a structure with a base offset of zero. - // - // 1. If the member is a scalar consuming N basic machine units, the base alignment is N. - // - // 2. If the member is a two- or four-component vector with components consuming N basic - // machine units, the base alignment is 2N or 4N, respectively. - // - // 3. If the member is a three-component vector with components consuming N - // basic machine units, the base alignment is 4N. - // - // 4. If the member is an array of scalars or vectors, the base alignment and array - // stride are set to match the base alignment of a single array element, according - // to rules (1), (2), and (3), and rounded up to the base alignment of a vec4. The - // array may have padding at the end; the base offset of the member following - // the array is rounded up to the next multiple of the base alignment. - // - // 5. If the member is a column-major matrix with C columns and R rows, the - // matrix is stored identically to an array of C column vectors with R - // components each, according to rule (4). - // - // 6. If the member is an array of S column-major matrices with C columns and - // R rows, the matrix is stored identically to a row of S  C column vectors - // with R components each, according to rule (4). - // - // 7. If the member is a row-major matrix with C columns and R rows, the matrix - // is stored identically to an array of R row vectors with C components each, - // according to rule (4). - // - // 8. If the member is an array of S row-major matrices with C columns and R - // rows, the matrix is stored identically to a row of S  R row vectors with C - // components each, according to rule (4). - // - // 9. If the member is a structure, the base alignment of the structure is N , where - // N is the largest base alignment value of any of its members, and rounded - // up to the base alignment of a vec4. The individual members of this substructure - // are then assigned offsets by applying this set of rules recursively, - // where the base offset of the first member of the sub-structure is equal to the - // aligned offset of the structure. The structure may have padding at the end; - // the base offset of the member following the sub-structure is rounded up to - // the next multiple of the base alignment of the structure. - // - // 10. If the member is an array of S structures, the S elements of the array are laid - // out in order, according to rule (9). - - // rules 4, 6, and 8 - if (type.isArray()) { - TType derefType(type, 0); - alignment = getBaseAlignment(derefType, size, std140); - if (std140) - alignment = std::max(baseAlignmentVec4Std140, alignment); - RoundToPow2(size, alignment); - size *= type.getArraySize(); - return alignment; - } - - // rule 9 - if (type.getBasicType() == EbtStruct) { - const TTypeList& memberList = *type.getStruct(); - - size = 0; - int maxAlignment = std140 ? baseAlignmentVec4Std140 : 0; - for (size_t m = 0; m < memberList.size(); ++m) { - int memberSize; - int memberAlignment = getBaseAlignment(*memberList[m].type, memberSize, std140); - maxAlignment = std::max(maxAlignment, memberAlignment); - RoundToPow2(size, memberAlignment); - size += memberSize; - } - - return maxAlignment; - } - - // rule 1 - if (type.isScalar()) - return getBaseAlignmentScalar(type, size); - - // rules 2 and 3 - if (type.isVector()) { - int scalarAlign = getBaseAlignmentScalar(type, size); - switch (type.getVectorSize()) { - case 2: - size *= 2; - return 2 * scalarAlign; - default: - size *= type.getVectorSize(); - return 4 * scalarAlign; - } - } - - // rules 5 and 7 - if (type.isMatrix()) { - // rule 5: deref to row, not to column, meaning the size of vector is num columns instead of num rows - TType derefType(type, 0, type.getQualifier().layoutMatrix == ElmRowMajor); - - alignment = getBaseAlignment(derefType, size, std140); - if (std140) - alignment = std::max(baseAlignmentVec4Std140, alignment); - RoundToPow2(size, alignment); - if (type.getQualifier().layoutMatrix == ElmRowMajor) - size *= type.getMatrixRows(); - else - size *= type.getMatrixCols(); - - return alignment; - } - - assert(0); // all cases should be covered above - size = baseAlignmentVec4Std140; - return baseAlignmentVec4Std140; -} - -} // end namespace glslang +// +//Copyright (C) 2013 LunarG, Inc. +// +//All rights reserved. +// +//Redistribution and use in source and binary forms, with or without +//modification, are permitted provided that the following conditions +//are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +//THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +//"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +//LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +//FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +//COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +//INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +//BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +//LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +//CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +//LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +//ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +//POSSIBILITY OF SUCH DAMAGE. +// + +// +// Do link-time merging and validation of intermediate representations. +// +// Basic model is that during compilation, each compilation unit (shader) is +// compiled into one TIntermediate instance. Then, at link time, multiple +// units for the same stage can be merged together, which can generate errors. +// Then, after all merging, a single instance of TIntermediate represents +// the whole stage. A final error check can be done on the resulting stage, +// even if no merging was done (i.e., the stage was only one compilation unit). +// + +#include "localintermediate.h" +#include "../Include/InfoSink.h" + +namespace glslang { + +// +// Link-time error emitter. +// +void TIntermediate::error(TInfoSink& infoSink, const char* message) +{ + infoSink.info.prefix(EPrefixError); + infoSink.info << "Linking " << StageName(language) << " stage: " << message << "\n"; + + ++numErrors; +} + +// TODO: 4.4 offset/align: "Two blocks linked together in the same program with the same block +// name must have the exact same set of members qualified with offset and their integral-constant +// expression values must be the same, or a link-time error results." + +// +// Merge the information from 'unit' into 'this' +// +void TIntermediate::merge(TInfoSink& infoSink, TIntermediate& unit) +{ + numMains += unit.numMains; + numErrors += unit.numErrors; + callGraph.insert(callGraph.end(), unit.callGraph.begin(), unit.callGraph.end()); + + if ((profile != EEsProfile && unit.profile == EEsProfile) || + (profile == EEsProfile && unit.profile != EEsProfile)) + error(infoSink, "Cannot mix ES profile with non-ES profile shaders\n"); + + if (originUpperLeft != unit.originUpperLeft || pixelCenterInteger != unit.pixelCenterInteger) + error(infoSink, "gl_FragCoord redeclarations must match across shaders\n"); + + if (! earlyFragmentTests) + earlyFragmentTests = unit.earlyFragmentTests; + + if (depthLayout == EldNone) + depthLayout = unit.depthLayout; + else if (depthLayout != unit.depthLayout) + error(infoSink, "Contradictory depth layouts"); + + if (inputPrimitive == ElgNone) + inputPrimitive = unit.inputPrimitive; + else if (inputPrimitive != unit.inputPrimitive) + error(infoSink, "Contradictory input layout primitives"); + + if (outputPrimitive == ElgNone) + outputPrimitive = unit.outputPrimitive; + else if (outputPrimitive != unit.outputPrimitive) + error(infoSink, "Contradictory output layout primitives"); + + if (vertices == 0) + vertices = unit.vertices; + else if (vertices != unit.vertices) { + if (language == EShLangGeometry) + error(infoSink, "Contradictory layout max_vertices values"); + else if (language == EShLangTessControl) + error(infoSink, "Contradictory layout vertices values"); + else + assert(0); + } + + if (vertexSpacing == EvsNone) + vertexSpacing = unit.vertexSpacing; + else if (vertexSpacing != unit.vertexSpacing) + error(infoSink, "Contradictory input vertex spacing"); + + if (vertexOrder == EvoNone) + vertexOrder = unit.vertexOrder; + else if (vertexOrder != unit.vertexOrder) + error(infoSink, "Contradictory triangle ordering"); + + if (unit.pointMode) + pointMode = true; + + for (int i = 0; i < 3; ++i) { + if (localSize[i] > 1) + localSize[i] = unit.localSize[i]; + else if (localSize[i] != unit.localSize[i]) + error(infoSink, "Contradictory local size"); + } + + if (unit.xfbMode) + xfbMode = true; + for (size_t b = 0; b < xfbBuffers.size(); ++b) { + if (xfbBuffers[b].stride == TQualifier::layoutXfbStrideEnd) + xfbBuffers[b].stride = unit.xfbBuffers[b].stride; + else if (xfbBuffers[b].stride != unit.xfbBuffers[b].stride) + error(infoSink, "Contradictory xfb_stride"); + xfbBuffers[b].implicitStride = std::max(xfbBuffers[b].implicitStride, unit.xfbBuffers[b].implicitStride); + if (unit.xfbBuffers[b].containsDouble) + xfbBuffers[b].containsDouble = true; + // TODO: 4.4 link: enhanced layouts: compare ranges + } + + if (unit.treeRoot == 0) + return; + + if (treeRoot == 0) { + treeRoot = unit.treeRoot; + version = unit.version; + requestedExtensions = unit.requestedExtensions; + return; + } + + // Getting this far means we have two existing trees to merge... + + version = std::max(version, unit.version); + requestedExtensions.insert(unit.requestedExtensions.begin(), unit.requestedExtensions.end()); + + // Get the top-level globals of each unit + TIntermSequence& globals = treeRoot->getAsAggregate()->getSequence(); + TIntermSequence& unitGlobals = unit.treeRoot->getAsAggregate()->getSequence(); + + // Get the linker-object lists + TIntermSequence& linkerObjects = findLinkerObjects(); + TIntermSequence& unitLinkerObjects = unit.findLinkerObjects(); + + mergeBodies(infoSink, globals, unitGlobals); + mergeLinkerObjects(infoSink, linkerObjects, unitLinkerObjects); + + ioAccessed.insert(unit.ioAccessed.begin(), unit.ioAccessed.end()); +} + +// +// Merge the function bodies and global-level initializers from unitGlobals into globals. +// Will error check duplication of function bodies for the same signature. +// +void TIntermediate::mergeBodies(TInfoSink& infoSink, TIntermSequence& globals, const TIntermSequence& unitGlobals) +{ + // TODO: link-time performance: Processing in alphabetical order will be faster + + // Error check the global objects, not including the linker objects + for (unsigned int child = 0; child < globals.size() - 1; ++child) { + for (unsigned int unitChild = 0; unitChild < unitGlobals.size() - 1; ++unitChild) { + TIntermAggregate* body = globals[child]->getAsAggregate(); + TIntermAggregate* unitBody = unitGlobals[unitChild]->getAsAggregate(); + if (body && unitBody && body->getOp() == EOpFunction && unitBody->getOp() == EOpFunction && body->getName() == unitBody->getName()) { + error(infoSink, "Multiple function bodies in multiple compilation units for the same signature in the same stage:"); + infoSink.info << " " << globals[child]->getAsAggregate()->getName() << "\n"; + } + } + } + + // Merge the global objects, just in front of the linker objects + globals.insert(globals.end() - 1, unitGlobals.begin(), unitGlobals.end() - 1); +} + +// +// Merge the linker objects from unitLinkerObjects into linkerObjects. +// Duplication is expected and filtered out, but contradictions are an error. +// +void TIntermediate::mergeLinkerObjects(TInfoSink& infoSink, TIntermSequence& linkerObjects, const TIntermSequence& unitLinkerObjects) +{ + // Error check and merge the linker objects (duplicates should not be created) + std::size_t initialNumLinkerObjects = linkerObjects.size(); + for (unsigned int unitLinkObj = 0; unitLinkObj < unitLinkerObjects.size(); ++unitLinkObj) { + bool merge = true; + for (std::size_t linkObj = 0; linkObj < initialNumLinkerObjects; ++linkObj) { + TIntermSymbol* symbol = linkerObjects[linkObj]->getAsSymbolNode(); + TIntermSymbol* unitSymbol = unitLinkerObjects[unitLinkObj]->getAsSymbolNode(); + assert(symbol && unitSymbol); + if (symbol->getName() == unitSymbol->getName()) { + // filter out copy + merge = false; + + // but if one has an initializer and the other does not, update + // the initializer + if (symbol->getConstArray().empty() && ! unitSymbol->getConstArray().empty()) + symbol->setConstArray(unitSymbol->getConstArray()); + + // Similarly for binding + if (! symbol->getQualifier().hasBinding() && unitSymbol->getQualifier().hasBinding()) + symbol->getQualifier().layoutBinding = unitSymbol->getQualifier().layoutBinding; + + // Update implicit array sizes + mergeImplicitArraySizes(symbol->getWritableType(), unitSymbol->getType()); + + // Check for consistent types/qualification/initializers etc. + mergeErrorCheck(infoSink, *symbol, *unitSymbol, false); + } + } + if (merge) + linkerObjects.push_back(unitLinkerObjects[unitLinkObj]); + } +} + +// TODO 4.5 link functionality: cull distance array size checking + +// Recursively merge the implicit array sizes through the objects' respective type trees. +void TIntermediate::mergeImplicitArraySizes(TType& type, const TType& unitType) +{ + if (type.isImplicitlySizedArray() && unitType.isArray()) { + int newImplicitArraySize = unitType.getArraySize(); + if (newImplicitArraySize == 0) + newImplicitArraySize = unitType.getImplicitArraySize(); + if (newImplicitArraySize > type.getImplicitArraySize ()) + type.setImplicitArraySize(newImplicitArraySize); + } + + // Type mismatches are caught and reported after this, just be careful for now. + if (! type.isStruct() || ! unitType.isStruct() || type.getStruct()->size() != unitType.getStruct()->size()) + return; + + for (int i = 0; i < (int)type.getStruct()->size(); ++i) + mergeImplicitArraySizes(*(*type.getStruct())[i].type, *(*unitType.getStruct())[i].type); +} + +// +// Compare two global objects from two compilation units and see if they match +// well enough. Rules can be different for intra- vs. cross-stage matching. +// +// This function only does one of intra- or cross-stage matching per call. +// +void TIntermediate::mergeErrorCheck(TInfoSink& infoSink, const TIntermSymbol& symbol, const TIntermSymbol& unitSymbol, bool crossStage) +{ + bool writeTypeComparison = false; + + // Types have to match + if (symbol.getType() != unitSymbol.getType()) { + error(infoSink, "Types must match:"); + writeTypeComparison = true; + } + + // Qualifiers have to (almost) match + + // Storage... + if (symbol.getQualifier().storage != unitSymbol.getQualifier().storage) { + error(infoSink, "Storage qualifiers must match:"); + writeTypeComparison = true; + } + + // Precision... + if (symbol.getQualifier().precision != unitSymbol.getQualifier().precision) { + error(infoSink, "Precision qualifiers must match:"); + writeTypeComparison = true; + } + + // Invariance... + if (! crossStage && symbol.getQualifier().invariant != unitSymbol.getQualifier().invariant) { + error(infoSink, "Presence of invariant qualifier must match:"); + writeTypeComparison = true; + } + + // Auxiliary and interpolation... + if (symbol.getQualifier().centroid != unitSymbol.getQualifier().centroid || + symbol.getQualifier().smooth != unitSymbol.getQualifier().smooth || + symbol.getQualifier().flat != unitSymbol.getQualifier().flat || + symbol.getQualifier().sample != unitSymbol.getQualifier().sample || + symbol.getQualifier().patch != unitSymbol.getQualifier().patch || + symbol.getQualifier().nopersp != unitSymbol.getQualifier().nopersp) { + error(infoSink, "Interpolation and auxiliary storage qualifiers must match:"); + writeTypeComparison = true; + } + + // Memory... + if (symbol.getQualifier().coherent != unitSymbol.getQualifier().coherent || + symbol.getQualifier().volatil != unitSymbol.getQualifier().volatil || + symbol.getQualifier().restrict != unitSymbol.getQualifier().restrict || + symbol.getQualifier().readonly != unitSymbol.getQualifier().readonly || + symbol.getQualifier().writeonly != unitSymbol.getQualifier().writeonly) { + error(infoSink, "Memory qualifiers must match:"); + writeTypeComparison = true; + } + + // Layouts... + // TODO: 4.4 enhanced layouts: Generalize to include offset/align: current spec + // requires separate user-supplied offset from actual computed offset, but + // current implementation only has one offset. + if (symbol.getQualifier().layoutMatrix != unitSymbol.getQualifier().layoutMatrix || + symbol.getQualifier().layoutPacking != unitSymbol.getQualifier().layoutPacking || + symbol.getQualifier().layoutLocation != unitSymbol.getQualifier().layoutLocation || + symbol.getQualifier().layoutComponent != unitSymbol.getQualifier().layoutComponent || + symbol.getQualifier().layoutIndex != unitSymbol.getQualifier().layoutIndex || + symbol.getQualifier().layoutBinding != unitSymbol.getQualifier().layoutBinding || + (symbol.getQualifier().hasBinding() && (symbol.getQualifier().layoutOffset != unitSymbol.getQualifier().layoutOffset))) { + error(infoSink, "Layout qualification must match:"); + writeTypeComparison = true; + } + + // Initializers have to match, if both are present, and if we don't already know the types don't match + if (! writeTypeComparison) { + if (! symbol.getConstArray().empty() && ! unitSymbol.getConstArray().empty()) { + if (symbol.getConstArray() != unitSymbol.getConstArray()) { + error(infoSink, "Initializers must match:"); + infoSink.info << " " << symbol.getName() << "\n"; + } + } + } + + if (writeTypeComparison) + infoSink.info << " " << symbol.getName() << ": \"" << symbol.getType().getCompleteString() << "\" versus \"" << + unitSymbol.getType().getCompleteString() << "\"\n"; +} + +// +// Do final link-time error checking of a complete (merged) intermediate representation. +// (Much error checking was done during merging). +// +// Also, lock in defaults of things not set, including array sizes. +// +void TIntermediate::finalCheck(TInfoSink& infoSink) +{ + if (numMains < 1) + error(infoSink, "Missing entry point: Each stage requires one \"void main()\" entry point"); + + // recursion checking + checkCallGraphCycles(infoSink); + + // overlap/alias/missing I/O, etc. + inOutLocationCheck(infoSink); + + if (inIoAccessed("gl_ClipDistance") && inIoAccessed("gl_ClipVertex")) + error(infoSink, "Can only use one of gl_ClipDistance or gl_ClipVertex (gl_ClipDistance is preferred)"); + + if (userOutputUsed() && (inIoAccessed("gl_FragColor") || inIoAccessed("gl_FragData"))) + error(infoSink, "Cannot use gl_FragColor or gl_FragData when using user-defined outputs"); + if (inIoAccessed("gl_FragColor") && inIoAccessed("gl_FragData")) + error(infoSink, "Cannot use both gl_FragColor and gl_FragData"); + + for (size_t b = 0; b < xfbBuffers.size(); ++b) { + if (xfbBuffers[b].containsDouble) + RoundToPow2(xfbBuffers[b].implicitStride, 8); + + // "It is a compile-time or link-time error to have + // any xfb_offset that overflows xfb_stride, whether stated on declarations before or after the xfb_stride, or + // in different compilation units. While xfb_stride can be declared multiple times for the same buffer, it is a + // compile-time or link-time error to have different values specified for the stride for the same buffer." + if (xfbBuffers[b].stride != TQualifier::layoutXfbStrideEnd && xfbBuffers[b].implicitStride > xfbBuffers[b].stride) { + error(infoSink, "xfb_stride is too small to hold all buffer entries:"); + infoSink.info.prefix(EPrefixError); + infoSink.info << " xfb_buffer " << (unsigned int)b << ", xfb_stride " << xfbBuffers[b].stride << ", minimum stride needed: " << xfbBuffers[b].implicitStride << "\n"; + } + if (xfbBuffers[b].stride == TQualifier::layoutXfbStrideEnd) + xfbBuffers[b].stride = xfbBuffers[b].implicitStride; + + // "If the buffer is capturing any + // outputs with double-precision components, the stride must be a multiple of 8, otherwise it must be a + // multiple of 4, or a compile-time or link-time error results." + if (xfbBuffers[b].containsDouble && ! IsMultipleOfPow2(xfbBuffers[b].stride, 8)) { + error(infoSink, "xfb_stride must be multiple of 8 for buffer holding a double:"); + infoSink.info.prefix(EPrefixError); + infoSink.info << " xfb_buffer " << (unsigned int)b << ", xfb_stride " << xfbBuffers[b].stride << "\n"; + } else if (! IsMultipleOfPow2(xfbBuffers[b].stride, 4)) { + error(infoSink, "xfb_stride must be multiple of 4:"); + infoSink.info.prefix(EPrefixError); + infoSink.info << " xfb_buffer " << (unsigned int)b << ", xfb_stride " << xfbBuffers[b].stride << "\n"; + } + + // "The resulting stride (implicit or explicit), when divided by 4, must be less than or equal to the + // implementation-dependent constant gl_MaxTransformFeedbackInterleavedComponents." + if (xfbBuffers[b].stride > (unsigned int)(4 * resources.maxTransformFeedbackInterleavedComponents)) { + error(infoSink, "xfb_stride is too large:"); + infoSink.info.prefix(EPrefixError); + infoSink.info << " xfb_buffer " << (unsigned int)b << ", components (1/4 stride) needed are " << xfbBuffers[b].stride/4 << ", gl_MaxTransformFeedbackInterleavedComponents is " << resources.maxTransformFeedbackInterleavedComponents << "\n"; + } + } + + switch (language) { + case EShLangVertex: + break; + case EShLangTessControl: + if (vertices == 0) + error(infoSink, "At least one shader must specify an output layout(vertices=...)"); + break; + case EShLangTessEvaluation: + if (inputPrimitive == ElgNone) + error(infoSink, "At least one shader must specify an input layout primitive"); + if (vertexSpacing == EvsNone) + vertexSpacing = EvsEqual; + if (vertexOrder == EvoNone) + vertexOrder = EvoCcw; + break; + case EShLangGeometry: + if (inputPrimitive == ElgNone) + error(infoSink, "At least one shader must specify an input layout primitive"); + if (outputPrimitive == ElgNone) + error(infoSink, "At least one shader must specify an output layout primitive"); + if (vertices == 0) + error(infoSink, "At least one shader must specify a layout(max_vertices = value)"); + break; + case EShLangFragment: + break; + case EShLangCompute: + break; + default: + error(infoSink, "Unknown Stage."); + break; + } + + // Process the tree for any node-specific work. + class TFinalLinkTraverser : public TIntermTraverser { + public: + TFinalLinkTraverser() { } + virtual ~TFinalLinkTraverser() { } + + virtual void visitSymbol(TIntermSymbol* symbol) + { + // Implicitly size arrays. + symbol->getWritableType().adoptImplicitArraySizes(); + } + } finalLinkTraverser; + + treeRoot->traverse(&finalLinkTraverser); +} + +// +// See if the call graph contains any static recursion, which is disallowed +// by the specification. +// +void TIntermediate::checkCallGraphCycles(TInfoSink& infoSink) +{ + // Reset everything, once. + for (TGraph::iterator call = callGraph.begin(); call != callGraph.end(); ++call) { + call->visited = false; + call->currentPath = false; + call->errorGiven = false; + } + + // + // Loop, looking for a new connected subgraph. One subgraph is handled per loop iteration. + // + + TCall* newRoot; + do { + // See if we have unvisited parts of the graph. + newRoot = 0; + for (TGraph::iterator call = callGraph.begin(); call != callGraph.end(); ++call) { + if (! call->visited) { + newRoot = &(*call); + break; + } + } + + // If not, we are done. + if (! newRoot) + break; + + // Otherwise, we found a new subgraph, process it: + // See what all can be reached by this new root, and if any of + // that is recursive. This is done by depth-first traversals, seeing + // if a new call is found that was already in the currentPath (a back edge), + // thereby detecting recursion. + std::list stack; + newRoot->currentPath = true; // currentPath will be true iff it is on the stack + stack.push_back(newRoot); + while (! stack.empty()) { + // get a caller + TCall* call = stack.back(); + + // Add to the stack just one callee. + // This algorithm always terminates, because only !visited and !currentPath causes a push + // and all pushes change currentPath to true, and all pops change visited to true. + TGraph::iterator child = callGraph.begin(); + for (; child != callGraph.end(); ++child) { + + // If we already visited this node, its whole subgraph has already been processed, so skip it. + if (child->visited) + continue; + + if (call->callee == child->caller) { + if (child->currentPath) { + // Then, we found a back edge + if (! child->errorGiven) { + error(infoSink, "Recursion detected:"); + infoSink.info << " " << call->callee << " calling " << child->callee << "\n"; + child->errorGiven = true; + recursive = true; + } + } else { + child->currentPath = true; + stack.push_back(&(*child)); + break; + } + } + } + if (child == callGraph.end()) { + // no more callees, we bottomed out, never look at this node again + stack.back()->currentPath = false; + stack.back()->visited = true; + stack.pop_back(); + } + } // end while, meaning nothing left to process in this subtree + + } while (newRoot); // redundant loop check; should always exit via the 'break' above +} + +// +// Satisfy rules for location qualifiers on inputs and outputs +// +void TIntermediate::inOutLocationCheck(TInfoSink& infoSink) +{ + // ES 3.0 requires all outputs to have location qualifiers if there is more than one output + bool fragOutHasLocation = false; + bool fragOutWithNoLocation = false; + int numFragOut = 0; + + // TODO: linker functionality: location collision checking + + TIntermSequence& linkObjects = findLinkerObjects(); + for (size_t i = 0; i < linkObjects.size(); ++i) { + const TType& type = linkObjects[i]->getAsTyped()->getType(); + const TQualifier& qualifier = type.getQualifier(); + if (language == EShLangFragment) { + if (qualifier.storage == EvqVaryingOut) { + ++numFragOut; + if (qualifier.hasAnyLocation()) + fragOutHasLocation = true; + else + fragOutWithNoLocation = true; + } + } + } + + if (profile == EEsProfile) { + if (numFragOut > 1 && fragOutWithNoLocation) + error(infoSink, "when more than one fragment shader output, all must have location qualifiers"); + } +} + +TIntermSequence& TIntermediate::findLinkerObjects() const +{ + // Get the top-level globals + TIntermSequence& globals = treeRoot->getAsAggregate()->getSequence(); + + // Get the last member of the sequences, expected to be the linker-object lists + assert(globals.back()->getAsAggregate()->getOp() == EOpLinkerObjects); + + return globals.back()->getAsAggregate()->getSequence(); +} + +// See if a variable was both a user-declared output and used. +// Note: the spec discusses writing to one, but this looks at read or write, which +// is more useful, and perhaps the spec should be changed to reflect that. +bool TIntermediate::userOutputUsed() const +{ + const TIntermSequence& linkerObjects = findLinkerObjects(); + + bool found = false; + for (size_t i = 0; i < linkerObjects.size(); ++i) { + const TIntermSymbol& symbolNode = *linkerObjects[i]->getAsSymbolNode(); + if (symbolNode.getQualifier().storage == EvqVaryingOut && + symbolNode.getName().compare(0, 3, "gl_") != 0 && + inIoAccessed(symbolNode.getName())) { + found = true; + break; + } + } + + return found; +} + +// Accumulate locations used for inputs, outputs, and uniforms, and check for collisions +// as the accumulation is done. +// +// Returns < 0 if no collision, >= 0 if collision and the value returned is a colliding value. +// +// typeCollision is set to true if there is no direct collision, but the types in the same location +// are different. +// +int TIntermediate::addUsedLocation(const TQualifier& qualifier, const TType& type, bool& typeCollision) +{ + typeCollision = false; + + int set; + if (qualifier.isPipeInput()) + set = 0; + else if (qualifier.isPipeOutput()) + set = 1; + else if (qualifier.storage == EvqUniform) + set = 2; + else if (qualifier.storage == EvqBuffer) + set = 3; + else + return -1; + + int size; + if (qualifier.isUniformOrBuffer()) { + if (type.isArray()) + size = type.getArraySize(); + else + size = 1; + } else { + // Strip off the outer array dimension for those having an extra one. + if (type.isArray() && qualifier.isArrayedIo(language)) { + TType elementType(type, 0); + size = computeTypeLocationSize(elementType); + } else + size = computeTypeLocationSize(type); + } + + TRange locationRange(qualifier.layoutLocation, qualifier.layoutLocation + size - 1); + TRange componentRange(0, 3); + if (qualifier.hasComponent()) { + componentRange.start = qualifier.layoutComponent; + componentRange.last = componentRange.start + type.getVectorSize() - 1; + } + TIoRange range(locationRange, componentRange, type.getBasicType(), qualifier.hasIndex() ? qualifier.layoutIndex : 0); + + // check for collisions, except for vertex inputs on desktop + if (! (profile != EEsProfile && language == EShLangVertex && qualifier.isPipeInput())) { + for (size_t r = 0; r < usedIo[set].size(); ++r) { + if (range.overlap(usedIo[set][r])) { + // there is a collision; pick one + return std::max(locationRange.start, usedIo[set][r].location.start); + } else if (locationRange.overlap(usedIo[set][r].location) && type.getBasicType() != usedIo[set][r].basicType) { + // aliased-type mismatch + typeCollision = true; + return std::max(locationRange.start, usedIo[set][r].location.start); + } + } + } + + usedIo[set].push_back(range); + + return -1; // no collision +} + +// Accumulate locations used for inputs, outputs, and uniforms, and check for collisions +// as the accumulation is done. +// +// Returns < 0 if no collision, >= 0 if collision and the value returned is a colliding value. +// +int TIntermediate::addUsedOffsets(int binding, int offset, int numOffsets) +{ + TRange bindingRange(binding, binding); + TRange offsetRange(offset, offset + numOffsets - 1); + TOffsetRange range(bindingRange, offsetRange); + + // check for collisions, except for vertex inputs on desktop + for (size_t r = 0; r < usedAtomics.size(); ++r) { + if (range.overlap(usedAtomics[r])) { + // there is a collision; pick one + return std::max(offset, usedAtomics[r].offset.start); + } + } + + usedAtomics.push_back(range); + + return -1; // no collision +} + +// Recursively figure out how many locations are used up by an input or output type. +// Return the size of type, as measured by "locations". +int TIntermediate::computeTypeLocationSize(const TType& type) const +{ + // "If the declared input is an array of size n and each element takes m locations, it will be assigned m * n + // consecutive locations..." + if (type.isArray()) { + TType elementType(type, 0); + if (type.isImplicitlySizedArray()) { + // TODO: are there valid cases of having an implicitly-sized array with a location? If so, running this code too early. + return computeTypeLocationSize(elementType); + } else + return type.getArraySize() * computeTypeLocationSize(elementType); + } + + // "The locations consumed by block and structure members are determined by applying the rules above + // recursively..." + if (type.isStruct()) { + int size = 0; + for (int member = 0; member < (int)type.getStruct()->size(); ++member) { + TType memberType(type, member); + size += computeTypeLocationSize(memberType); + } + return size; + } + + // ES: "If a shader input is any scalar or vector type, it will consume a single location." + + // Desktop: "If a vertex shader input is any scalar or vector type, it will consume a single location. If a non-vertex + // shader input is a scalar or vector type other than dvec3 or dvec4, it will consume a single location, while + // types dvec3 or dvec4 will consume two consecutive locations. Inputs of type double and dvec2 will + // consume only a single location, in all stages." + if (type.isScalar()) + return 1; + if (type.isVector()) { + if (language == EShLangVertex && type.getQualifier().isPipeInput()) + return 1; + if (type.getBasicType() == EbtDouble && type.getVectorSize() > 2) + return 2; + else + return 1; + } + + // "If the declared input is an n x m single- or double-precision matrix, ... + // The number of locations assigned for each matrix will be the same as + // for an n-element array of m-component vectors..." + if (type.isMatrix()) { + TType columnType(type, 0); + return type.getMatrixCols() * computeTypeLocationSize(columnType); + } + + assert(0); + return 1; +} + +// Accumulate xfb buffer ranges and check for collisions as the accumulation is done. +// +// Returns < 0 if no collision, >= 0 if collision and the value returned is a colliding value. +// +int TIntermediate::addXfbBufferOffset(const TType& type) +{ + const TQualifier& qualifier = type.getQualifier(); + + assert(qualifier.hasXfbOffset() && qualifier.hasXfbBuffer()); + TXfbBuffer& buffer = xfbBuffers[qualifier.layoutXfbBuffer]; + + // compute the range + unsigned int size = computeTypeXfbSize(type, buffer.containsDouble); + buffer.implicitStride = std::max(buffer.implicitStride, qualifier.layoutXfbOffset + size); + TRange range(qualifier.layoutXfbOffset, qualifier.layoutXfbOffset + size - 1); + + // check for collisions + for (size_t r = 0; r < buffer.ranges.size(); ++r) { + if (range.overlap(buffer.ranges[r])) { + // there is a collision; pick an example to return + return std::max(range.start, buffer.ranges[r].start); + } + } + + buffer.ranges.push_back(range); + + return -1; // no collision +} + +// Recursively figure out how many bytes of xfb buffer are used by the given type. +// Return the size of type, in bytes. +// Sets containsDouble to true if the type contains a double. +// N.B. Caller must set containsDouble to false before calling. +unsigned int TIntermediate::computeTypeXfbSize(const TType& type, bool& containsDouble) const +{ + // "...if applied to an aggregate containing a double, the offset must also be a multiple of 8, + // and the space taken in the buffer will be a multiple of 8. + // ...within the qualified entity, subsequent components are each + // assigned, in order, to the next available offset aligned to a multiple of + // that component's size. Aggregate types are flattened down to the component + // level to get this sequence of components." + + if (type.isArray()) { + assert(type.isExplicitlySizedArray()); + TType elementType(type, 0); + return type.getArraySize() * computeTypeXfbSize(elementType, containsDouble); + } + + if (type.isStruct()) { + unsigned int size = 0; + bool structContainsDouble = false; + for (int member = 0; member < (int)type.getStruct()->size(); ++member) { + TType memberType(type, member); + // "... if applied to + // an aggregate containing a double, the offset must also be a multiple of 8, + // and the space taken in the buffer will be a multiple of 8." + bool memberContainsDouble = false; + int memberSize = computeTypeXfbSize(memberType, memberContainsDouble); + if (memberContainsDouble) { + structContainsDouble = true; + RoundToPow2(size, 8); + } + size += memberSize; + } + + if (structContainsDouble) { + containsDouble = true; + RoundToPow2(size, 8); + } + return size; + } + + int numComponents; + if (type.isScalar()) + numComponents = 1; + else if (type.isVector()) + numComponents = type.getVectorSize(); + else if (type.isMatrix()) + numComponents = type.getMatrixCols() * type.getMatrixRows(); + else { + assert(0); + numComponents = 1; + } + + if (type.getBasicType() == EbtDouble) { + containsDouble = true; + return 8 * numComponents; + } else + return 4 * numComponents; +} + +const int baseAlignmentVec4Std140 = 16; + +// Return the size and alignment of a scalar. +// The size is returned in the 'size' parameter +// Return value is the alignment of the type. +int TIntermediate::getBaseAlignmentScalar(const TType& type, int& size) +{ + switch (type.getBasicType()) { + case EbtDouble: size = 8; return 8; + default: size = 4; return 4; + } +} + +// Implement base-alignment and size rules from section 7.6.2.2 Standard Uniform Block Layout +// Operates recursively. +// +// If std140 is true, it does the rounding up to vec4 size required by std140, +// otherwise it does not, yielding std430 rules. +// +// The size is returned in the 'size' parameter +// Return value is the alignment of the type. +int TIntermediate::getBaseAlignment(const TType& type, int& size, bool std140) +{ + int alignment; + + // When using the std140 storage layout, structures will be laid out in buffer + // storage with its members stored in monotonically increasing order based on their + // location in the declaration. A structure and each structure member have a base + // offset and a base alignment, from which an aligned offset is computed by rounding + // the base offset up to a multiple of the base alignment. The base offset of the first + // member of a structure is taken from the aligned offset of the structure itself. The + // base offset of all other structure members is derived by taking the offset of the + // last basic machine unit consumed by the previous member and adding one. Each + // structure member is stored in memory at its aligned offset. The members of a top- + // level uniform block are laid out in buffer storage by treating the uniform block as + // a structure with a base offset of zero. + // + // 1. If the member is a scalar consuming N basic machine units, the base alignment is N. + // + // 2. If the member is a two- or four-component vector with components consuming N basic + // machine units, the base alignment is 2N or 4N, respectively. + // + // 3. If the member is a three-component vector with components consuming N + // basic machine units, the base alignment is 4N. + // + // 4. If the member is an array of scalars or vectors, the base alignment and array + // stride are set to match the base alignment of a single array element, according + // to rules (1), (2), and (3), and rounded up to the base alignment of a vec4. The + // array may have padding at the end; the base offset of the member following + // the array is rounded up to the next multiple of the base alignment. + // + // 5. If the member is a column-major matrix with C columns and R rows, the + // matrix is stored identically to an array of C column vectors with R + // components each, according to rule (4). + // + // 6. If the member is an array of S column-major matrices with C columns and + // R rows, the matrix is stored identically to a row of S  C column vectors + // with R components each, according to rule (4). + // + // 7. If the member is a row-major matrix with C columns and R rows, the matrix + // is stored identically to an array of R row vectors with C components each, + // according to rule (4). + // + // 8. If the member is an array of S row-major matrices with C columns and R + // rows, the matrix is stored identically to a row of S  R row vectors with C + // components each, according to rule (4). + // + // 9. If the member is a structure, the base alignment of the structure is N , where + // N is the largest base alignment value of any of its members, and rounded + // up to the base alignment of a vec4. The individual members of this substructure + // are then assigned offsets by applying this set of rules recursively, + // where the base offset of the first member of the sub-structure is equal to the + // aligned offset of the structure. The structure may have padding at the end; + // the base offset of the member following the sub-structure is rounded up to + // the next multiple of the base alignment of the structure. + // + // 10. If the member is an array of S structures, the S elements of the array are laid + // out in order, according to rule (9). + + // rules 4, 6, and 8 + if (type.isArray()) { + TType derefType(type, 0); + alignment = getBaseAlignment(derefType, size, std140); + if (std140) + alignment = std::max(baseAlignmentVec4Std140, alignment); + RoundToPow2(size, alignment); + size *= type.getArraySize(); + return alignment; + } + + // rule 9 + if (type.getBasicType() == EbtStruct) { + const TTypeList& memberList = *type.getStruct(); + + size = 0; + int maxAlignment = std140 ? baseAlignmentVec4Std140 : 0; + for (size_t m = 0; m < memberList.size(); ++m) { + int memberSize; + int memberAlignment = getBaseAlignment(*memberList[m].type, memberSize, std140); + maxAlignment = std::max(maxAlignment, memberAlignment); + RoundToPow2(size, memberAlignment); + size += memberSize; + } + + return maxAlignment; + } + + // rule 1 + if (type.isScalar()) + return getBaseAlignmentScalar(type, size); + + // rules 2 and 3 + if (type.isVector()) { + int scalarAlign = getBaseAlignmentScalar(type, size); + switch (type.getVectorSize()) { + case 2: + size *= 2; + return 2 * scalarAlign; + default: + size *= type.getVectorSize(); + return 4 * scalarAlign; + } + } + + // rules 5 and 7 + if (type.isMatrix()) { + // rule 5: deref to row, not to column, meaning the size of vector is num columns instead of num rows + TType derefType(type, 0, type.getQualifier().layoutMatrix == ElmRowMajor); + + alignment = getBaseAlignment(derefType, size, std140); + if (std140) + alignment = std::max(baseAlignmentVec4Std140, alignment); + RoundToPow2(size, alignment); + if (type.getQualifier().layoutMatrix == ElmRowMajor) + size *= type.getMatrixRows(); + else + size *= type.getMatrixCols(); + + return alignment; + } + + assert(0); // all cases should be covered above + size = baseAlignmentVec4Std140; + return baseAlignmentVec4Std140; +} + +} // end namespace glslang diff --git a/glslang/MachineIndependent/reflection.h b/glslang/MachineIndependent/reflection.h index 5448f91..47b51cd 100644 --- a/glslang/MachineIndependent/reflection.h +++ b/glslang/MachineIndependent/reflection.h @@ -1,122 +1,122 @@ -// -//Copyright (C) 2013 LunarG, Inc. -// -//All rights reserved. -// -//Redistribution and use in source and binary forms, with or without -//modification, are permitted provided that the following conditions -//are met: -// -// Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// -// Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// -// Neither the name of 3Dlabs Inc. Ltd. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -//THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -//"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -//LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS -//FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE -//COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, -//INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, -//BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -//LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -//CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT -//LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN -//ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -//POSSIBILITY OF SUCH DAMAGE. -// - -#ifndef _REFLECTION_INCLUDED -#define _REFLECTION_INCLUDED - -#include "../Public/ShaderLang.h" - -#include -#include - -// -// A reflection database and its interface, consistent with the OpenGL API reflection queries. -// - -namespace glslang { - -class TIntermediate; -class TIntermAggregate; -class TLiveTraverser; - -// Data needed for just a single object at the granularity exchanged by the reflection API -class TObjectReflection { -public: - TObjectReflection(const TString& pName, int pOffset, int pGLDefineType, int pSize, int pIndex) : - name(pName), offset(pOffset), glDefineType(pGLDefineType), size(pSize), index(pIndex) { } - void dump() const { printf("%s: offset %d, type %x, size %d, index %d\n", name.c_str(), offset, glDefineType, size, index); } - TString name; - int offset; - int glDefineType; - int size; // data size in bytes for a block, array size for a (non-block) object that's an array - int index; -}; - -// The full reflection database -class TReflection { -public: - TReflection() : badReflection("__bad__", -1, -1, -1, -1) {} - virtual ~TReflection() {} - - // grow the reflection stage by stage - bool addStage(EShLanguage, const TIntermediate&); - - // for mapping a uniform index to a uniform object's description - int getNumUniforms() { return (int)indexToUniform.size(); } - const TObjectReflection& getUniform(int i) const - { - if (i >= 0 && i < (int)indexToUniform.size()) - return indexToUniform[i]; - else - return badReflection; - } - - // for mapping a block index to the block's description - int getNumUniformBlocks() const { return (int)indexToUniformBlock.size(); } - const TObjectReflection& getUniformBlock(int i) const - { - if (i >= 0 && i < (int)indexToUniformBlock.size()) - return indexToUniformBlock[i]; - else - return badReflection; - } - - // for mapping any name to its index (both block names and uniforms names) - int getIndex(const char* name) const - { - TNameToIndex::const_iterator it = nameToIndex.find(name); - if (it == nameToIndex.end()) - return -1; - else - return it->second; - } - - void dump(); - -protected: - friend class glslang::TLiveTraverser; - - typedef std::map TNameToIndex; - typedef std::vector TMapIndexToReflection; - - TObjectReflection badReflection; // return for queries of -1 or generally out of range; has expected descriptions with in it for this - TNameToIndex nameToIndex; // maps names to indexes; can hold all types of data: uniform/buffer and which function names have been processed - TMapIndexToReflection indexToUniform; - TMapIndexToReflection indexToUniformBlock; -}; - -} // end namespace glslang - -#endif // _REFLECTION_INCLUDED +// +//Copyright (C) 2013 LunarG, Inc. +// +//All rights reserved. +// +//Redistribution and use in source and binary forms, with or without +//modification, are permitted provided that the following conditions +//are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +//THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +//"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +//LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +//FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +//COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +//INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +//BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +//LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +//CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +//LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +//ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +//POSSIBILITY OF SUCH DAMAGE. +// + +#ifndef _REFLECTION_INCLUDED +#define _REFLECTION_INCLUDED + +#include "../Public/ShaderLang.h" + +#include +#include + +// +// A reflection database and its interface, consistent with the OpenGL API reflection queries. +// + +namespace glslang { + +class TIntermediate; +class TIntermAggregate; +class TLiveTraverser; + +// Data needed for just a single object at the granularity exchanged by the reflection API +class TObjectReflection { +public: + TObjectReflection(const TString& pName, int pOffset, int pGLDefineType, int pSize, int pIndex) : + name(pName), offset(pOffset), glDefineType(pGLDefineType), size(pSize), index(pIndex) { } + void dump() const { printf("%s: offset %d, type %x, size %d, index %d\n", name.c_str(), offset, glDefineType, size, index); } + TString name; + int offset; + int glDefineType; + int size; // data size in bytes for a block, array size for a (non-block) object that's an array + int index; +}; + +// The full reflection database +class TReflection { +public: + TReflection() : badReflection("__bad__", -1, -1, -1, -1) {} + virtual ~TReflection() {} + + // grow the reflection stage by stage + bool addStage(EShLanguage, const TIntermediate&); + + // for mapping a uniform index to a uniform object's description + int getNumUniforms() { return (int)indexToUniform.size(); } + const TObjectReflection& getUniform(int i) const + { + if (i >= 0 && i < (int)indexToUniform.size()) + return indexToUniform[i]; + else + return badReflection; + } + + // for mapping a block index to the block's description + int getNumUniformBlocks() const { return (int)indexToUniformBlock.size(); } + const TObjectReflection& getUniformBlock(int i) const + { + if (i >= 0 && i < (int)indexToUniformBlock.size()) + return indexToUniformBlock[i]; + else + return badReflection; + } + + // for mapping any name to its index (both block names and uniforms names) + int getIndex(const char* name) const + { + TNameToIndex::const_iterator it = nameToIndex.find(name); + if (it == nameToIndex.end()) + return -1; + else + return it->second; + } + + void dump(); + +protected: + friend class glslang::TLiveTraverser; + + typedef std::map TNameToIndex; + typedef std::vector TMapIndexToReflection; + + TObjectReflection badReflection; // return for queries of -1 or generally out of range; has expected descriptions with in it for this + TNameToIndex nameToIndex; // maps names to indexes; can hold all types of data: uniform/buffer and which function names have been processed + TMapIndexToReflection indexToUniform; + TMapIndexToReflection indexToUniformBlock; +}; + +} // end namespace glslang + +#endif // _REFLECTION_INCLUDED diff --git a/glslang/OSDependent/Linux/CMakeLists.txt b/glslang/OSDependent/Linux/CMakeLists.txt index 13b53a1..c2c6ad8 100644 --- a/glslang/OSDependent/Linux/CMakeLists.txt +++ b/glslang/OSDependent/Linux/CMakeLists.txt @@ -6,5 +6,5 @@ include_directories(. ../../../OGLCompilersDLL) add_library(OSDependent STATIC ossource.cpp osinclude.h) -install(TARGETS OSDependent - ARCHIVE DESTINATION lib) +install(TARGETS OSDependent + ARCHIVE DESTINATION lib) diff --git a/glslang/OSDependent/Windows/CMakeLists.txt b/glslang/OSDependent/Windows/CMakeLists.txt index b378be1..2fcb2c6 100644 --- a/glslang/OSDependent/Windows/CMakeLists.txt +++ b/glslang/OSDependent/Windows/CMakeLists.txt @@ -1,14 +1,14 @@ -cmake_minimum_required(VERSION 2.8) - -include_directories(. ../../../OGLCompilersDLL) - -set(SOURCES ossource.cpp osinclude.h) - -add_library(OSDependent STATIC ${SOURCES}) - -if(WIN32) - source_group("Source" FILES ${SOURCES}) -endif(WIN32) - -install(TARGETS OSDependent - ARCHIVE DESTINATION lib) +cmake_minimum_required(VERSION 2.8) + +include_directories(. ../../../OGLCompilersDLL) + +set(SOURCES ossource.cpp osinclude.h) + +add_library(OSDependent STATIC ${SOURCES}) + +if(WIN32) + source_group("Source" FILES ${SOURCES}) +endif(WIN32) + +install(TARGETS OSDependent + ARCHIVE DESTINATION lib) -- 2.7.4