return spv::DecorationMax;
}
-
// Translate a glslang built-in variable to a SPIR-V built in decoration. Also generate
// associated capabilities when required. For some built-in variables, a capability
// is generated only when using the variable in an executable instruction, but not when
// Does it need a swizzle inversion? If so, evaluation is inverted;
// operate first on the swizzle base, then apply the swizzle.
- if (glslangOperands[0]->getAsOperator() &&
+ if (glslangOperands[0]->getAsOperator() &&
glslangOperands[0]->getAsOperator()->getOp() == glslang::EOpVectorSwizzle)
invertedType = convertGlslangToSpvType(glslangOperands[0]->getAsBinaryNode()->getLeft()->getType());
}
// is applied.
spv::Id TGlslangToSpvTraverser::getInvertedSwizzleType(const glslang::TIntermTyped& node)
{
- if (node.getAsOperator() &&
+ if (node.getAsOperator() &&
node.getAsOperator()->getOp() == glslang::EOpVectorSwizzle)
return convertGlslangToSpvType(node.getAsBinaryNode()->getLeft()->getType());
else
return spvType;
}
-
// Do full recursive conversion of a glslang structure (or block) type to a SPIR-V Id.
// explicitLayout can be kept the same throughout the hierarchical recursive walk.
// Mutually recursive with convertGlslangToSpvType().
}
// copy the projective coordinate if we have to
if (projTargetComp != projSourceComp) {
- spv::Id projComp = builder.createCompositeExtract(params.coords,
+ spv::Id projComp = builder.createCompositeExtract(params.coords,
builder.getScalarTypeId(builder.getTypeId(params.coords)),
projSourceComp);
params.coords = builder.createCompositeInsert(projComp, params.coords,
if (builtIn != spv::BuiltInMax)
addDecoration(id, spv::DecorationBuiltIn, (int)builtIn);
-#ifdef NV_EXTENSIONS
+#ifdef NV_EXTENSIONS
if (builtIn == spv::BuiltInSampleMask) {
spv::Decoration decoration;
// GL_NV_sample_mask_override_coverage extension
switch (opCode) {
case spv::OpTypeVector: // fall through
- case spv::OpTypeMatrix: // ...
- case spv::OpTypeSampler: // ...
- case spv::OpTypeArray: // ...
- case spv::OpTypeRuntimeArray: // ...
+ case spv::OpTypeMatrix: // ...
+ case spv::OpTypeSampler: // ...
+ case spv::OpTypeArray: // ...
+ case spv::OpTypeRuntimeArray: // ...
case spv::OpTypePipe: return range_t(2, 3);
case spv::OpTypeStruct: // fall through
case spv::OpTypeFunction: return range_t(2, maxCount);
return literal;
}
-
void spirvbin_t::applyMap()
{
msg(3, 2, std::string("Applying map: "));
);
}
-
// Find free IDs for anything we haven't mapped
void spirvbin_t::mapRemainder()
{
if (idPosR.find(asId(start+1)) == idPosR.end())
stripInst(start);
break;
- default:
+ default:
break; // leave it alone
}
if (spv::InstructionDesc[opCode].hasResult()) {
const spv::Id resultId = asId(word++);
idPosR[resultId] = start;
-
+
if (typeId != spv::NoResult) {
const unsigned idTypeSize = typeSizeInWords(typeId);
error("bad schema, must be 0");
}
-
int spirvbin_t::processInstruction(unsigned word, instfn_t instFn, idfn_t idFn)
{
const auto instructionStart = word;
},
// If local var id used anywhere else, don't eliminate
- [&](spv::Id& id) {
+ [&](spv::Id& id) {
if (fnLocalVars.count(id) > 0) {
fnLocalVars.erase(id);
idMap.erase(id);
}
}
-
#ifdef NOTDEF
bool spirvbin_t::matchType(const spirvbin_t::globaltypes_t& globalTypes, spv::Id lt, spv::Id gt) const
{
}
}
-
// Look for an equivalent type in the globalTypes map
spv::Id spirvbin_t::findType(const spirvbin_t::globaltypes_t& globalTypes, spv::Id lt) const
{
}
}
-
// Strip a single binary by removing ranges given in stripRange
void spirvbin_t::strip()
{
{
public:
spirvbin_t(int verbose = 0) : entryPoint(spv::NoResult), largestNewId(0), verbose(verbose) { }
-
+
// remap on an existing binary in memory
void remap(std::vector<std::uint32_t>& spv, std::uint32_t opts = DO_EVERYTHING);
range_t constRange(spv::Op opCode) const;
unsigned typeSizeInWords(spv::Id id) const;
unsigned idTypeSizeInWords(spv::Id id) const;
-
+
spv::Id& asId(unsigned word) { return spv[word]; }
const spv::Id& asId(unsigned word) const { return spv[word]; }
spv::Op asOpCode(unsigned word) const { return opOpCode(spv[word]); }
void stripDebug(); // strip all debug info
void stripDeadRefs(); // strips debug info for now-dead references after DCE
void strip(); // remove debug symbols
-
+
std::vector<spirword_t> spv; // SPIR words
namemap_t nameMap; // ID names from OpName
// Which functions are called, anywhere in the module, with a call count
std::unordered_map<spv::Id, int> fnCalls;
-
+
posmap_t typeConstPos; // word positions that define types & consts (ordered)
posmap_rev_t idPosR; // reverse map from IDs to positions
typesize_map_t idTypeSizeMap; // maps each ID to its type size, if known.
-
+
std::vector<spv::Id> idMapL; // ID {M}ap from {L}ocal to {G}lobal IDs
spv::Id entryPoint; // module entry point
{
Instruction* import = new Instruction(getUniqueId(), NoType, OpExtInstImport);
import->addStringOperand(name);
-
+
imports.push_back(std::unique_ptr<Instruction>(import));
return import->getResultId();
}
type = groupedTypes[OpTypeStruct][t];
if (type->getNumOperands() != 2)
continue;
- if (type->getIdOperand(0) != type0 ||
+ if (type->getIdOperand(0) != type0 ||
type->getIdOperand(1) != type1)
continue;
return type->getResultId();
bool Builder::isConstantOpCode(Op opcode) const
{
switch (opcode) {
- case OpUndef:
+ case OpUndef:
case OpConstantTrue:
case OpConstantFalse:
case OpConstant:
}
}
-
// Step 2: Construct a matrix from that array.
// First make the column vectors, then make the matrix.
//
// Simple in-memory representation (IR) of SPIRV. Just for holding
// Each function's CFG of blocks. Has this hierarchy:
-// - Module, which is a list of
-// - Function, which is a list of
-// - Block, which is a list of
+// - Module, which is a list of
+// - Function, which is a list of
+// - Block, which is a list of
// - Instruction
//
const Id NoType = 0;
const Decoration NoPrecision = DecorationMax;
-const MemorySemanticsMask MemorySemanticsAllMemory =
+const MemorySemanticsMask MemorySemanticsAllMemory =
(MemorySemanticsMask)(MemorySemanticsSequentiallyConsistentMask |
MemorySemanticsUniformMemoryMask |
MemorySemanticsSubgroupMemoryMask |
std::vector<std::unique_ptr<Instruction> > localVariables;
Function& parent;
- // track whether this block is known to be uncreachable (not necessarily
+ // track whether this block is known to be uncreachable (not necessarily
// true for all unreachable blocks, but should be set at least
// for the extraneous ones introduced by the builder).
bool unreachable;
if (!isdigit(argv[1][0])) {
if (argc < 3) // this form needs one more argument
usage();
-
+
// Parse form: --argname stage base
const EShLanguage lang = FindLanguage(argv[1], false);
base[lang] = atoi(argv[2]);
Work[w] = 0;
argc--;
- argv++;
+ argv++;
for (; argc >= 1; argc--, argv++) {
if (argv[0][0] == '-') {
switch (argv[0][1]) {
lowerword == "sub") {
ProcessBindingBase(argc, argv, baseUboBinding);
} else if (lowerword == "auto-map-bindings" || // synonyms
- lowerword == "auto-map-binding" ||
+ lowerword == "auto-map-binding" ||
lowerword == "amb") {
Options |= EOptionAutoMapBindings;
} else if (lowerword == "flatten-uniform-arrays" || // synonyms
if (Options & EOptionAutoMapBindings)
shader->setAutoMapBindings(true);
-
+
shaders.push_back(shader);
const int defaultVersion = Options & EOptionDefaultDesktop? 110: 100;
if (!program.mapIO())
LinkFailed = true;
}
-
+
// Report
if (! (Options & EOptionSuppressInfolog) &&
! (Options & EOptionMemoryLeakMode)) {
}
//
-// Read a file's data into a string, and compile it using the old interface ShCompile,
+// Read a file's data into a string, and compile it using the old interface ShCompile,
// for non-linkable results.
//
void CompileFile(const char* fileName, ShHandle compiler)
EShMessages messages = EShMsgDefault;
SetMessageOptions(messages);
-
+
for (int i = 0; i < ((Options & EOptionMemoryLeakMode) ? 100 : 1); ++i) {
for (int j = 0; j < ((Options & EOptionMemoryLeakMode) ? 100 : 1); ++j) {
//ret = ShCompile(compiler, shaderStrings, NumShaderStrings, lengths, EShOptNone, &Resources, Options, (Options & EOptionDefaultDesktop) ? 110 : 100, false, messages);
ret = ShCompile(compiler, shaderStrings, NumShaderStrings, nullptr, EShOptNone, &Resources, Options, (Options & EOptionDefaultDesktop) ? 110 : 100, false, messages);
- //const char* multi[12] = { "# ve", "rsion", " 300 e", "s", "\n#err",
- // "or should be l", "ine 1", "string 5\n", "float glo", "bal",
+ //const char* multi[12] = { "# ve", "rsion", " 300 e", "s", "\n#err",
+ // "or should be l", "ine 1", "string 5\n", "float glo", "bal",
// ";\n#error should be line 2\n void main() {", "global = 2.3;}" };
//const char* multi[7] = { "/", "/", "\\", "\n", "\n", "#", "version 300 es" };
//ret = ShCompile(compiler, multi, 7, nullptr, EShOptNone, &Resources, Options, (Options & EOptionDefaultDesktop) ? 110 : 100, false, messages);
//
// Malloc a string of sufficient size and read a string into it.
//
-char** ReadFileData(const char* fileName)
+char** ReadFileData(const char* fileName)
{
FILE *in = nullptr;
int errorCode = fopen_s(&in, fileName, "r");
if (errorCode || in == nullptr)
Error("unable to open input file");
-
+
while (fgetc(in) != EOF)
count++;
break;
}
len = count;
- }
+ }
++i;
}
void add(TWorkItem* item)
{
GetGlobalLock();
-
+
worklist.push_back(item);
-
+
ReleaseGlobalLock();
}
-
+
bool remove(TWorkItem*& item)
{
GetGlobalLock();
-
+
if (worklist.empty())
return false;
item = worklist.front();
worklist.pop_front();
-
+
ReleaseGlobalLock();
return true;
<< " [--map (all|types|names|funcs)]"
<< " [--dce (all|types|funcs)]"
<< " [--opt (all|loadstore)]"
- << " [--strip-all | --strip all | -s]"
- << " [--do-everything]"
+ << " [--strip-all | --strip all | -s]"
+ << " [--do-everything]"
<< " --input | -i file1 [file2...] --output|-o DESTDIR"
<< std::endl;
} // namespace
-
int main(int argc, char** argv)
{
std::vector<std::string> inputFile;
EvqUniform, // read only, shared with app
EvqBuffer, // read/write, shared with app
EvqShared, // compute shader's read/write 'shared' qualifier
-
+
// parameters
EvqIn, // also, for 'in' in the grammar before we know if it's a pipeline input or an 'in' parameter
EvqOut, // also, for 'out' in the grammar before we know if it's a pipeline output or an 'out' parameter
};
// These will show up in error messages
-__inline const char* GetStorageQualifierString(TStorageQualifier q)
+__inline const char* GetStorageQualifierString(TStorageQualifier q)
{
switch (q) {
case EvqTemporary: return "temp"; break;
#if defined(_MSC_VER) && _MSC_VER < 1700
inline long long int strtoll (const char* str, char** endptr, int base)
{
- return _strtoi64(str, endptr, base);
+ return _strtoi64(str, endptr, base);
}
inline unsigned long long int strtoull (const char* str, char** endptr, int base)
{
template <class T> class TList : public std::list<T, pool_allocator<T> > {
};
-template <class K, class D, class CMP = std::less<K> >
+template <class K, class D, class CMP = std::less<K> >
class TMap : public std::map<K, D, CMP, pool_allocator<std::pair<K const, D> > > {
};
inline const TString String(const int i, const int /*base*/ = 10)
{
char text[16]; // 32 bit ints are at most 10 digits in base 10
-
+
// we assume base 10 for all cases
snprintf(text, sizeof(text), "%d", i);
return text;
}
#endif
-
+
struct TSourceLoc {
void init() { name = nullptr; string = 0; line = 0; column = 0; }
// Returns the name if it exists. Otherwise, returns the string number.
TConstUnion() : iConst(0), type(EbtInt) { }
void setIConst(int i)
- {
- iConst = i;
+ {
+ iConst = i;
type = EbtInt;
}
void setUConst(unsigned int u)
{
- uConst = u;
+ uConst = u;
type = EbtUint;
}
void setDConst(double d)
{
- dConst = d;
+ dConst = d;
type = EbtDouble;
}
void setBConst(bool b)
{
- bConst = b;
+ bConst = b;
type = EbtBool;
}
}
bool operator>(const TConstUnion& constant) const
- {
+ {
assert(type == constant.type);
switch (type) {
case EbtInt:
}
bool operator<(const TConstUnion& constant) const
- {
+ {
assert(type == constant.type);
switch (type) {
case EbtInt:
}
TConstUnion operator+(const TConstUnion& constant) const
- {
+ {
TConstUnion returnValue;
assert(type == constant.type);
switch (type) {
}
TConstUnion operator-(const TConstUnion& constant) const
- {
+ {
TConstUnion returnValue;
assert(type == constant.type);
switch (type) {
}
TConstUnion operator*(const TConstUnion& constant) const
- {
+ {
TConstUnion returnValue;
assert(type == constant.type);
switch (type) {
case EbtInt64: returnValue.setI64Const(i64Const * constant.i64Const); break;
case EbtUint: returnValue.setUConst(uConst * constant.uConst); break;
case EbtUint64: returnValue.setU64Const(u64Const * constant.u64Const); break;
- case EbtDouble: returnValue.setDConst(dConst * constant.dConst); break;
+ case EbtDouble: returnValue.setDConst(dConst * constant.dConst); break;
default: assert(false && "Default missing");
}
}
TConstUnion operator%(const TConstUnion& constant) const
- {
+ {
TConstUnion returnValue;
assert(type == constant.type);
switch (type) {
}
TConstUnion operator>>(const TConstUnion& constant) const
- {
+ {
TConstUnion returnValue;
switch (type) {
case EbtInt:
}
TConstUnion operator<<(const TConstUnion& constant) const
- {
+ {
TConstUnion returnValue;
switch (type) {
case EbtInt:
}
TConstUnion operator&(const TConstUnion& constant) const
- {
+ {
TConstUnion returnValue;
assert(type == constant.type);
switch (type) {
}
TConstUnion operator|(const TConstUnion& constant) const
- {
+ {
TConstUnion returnValue;
assert(type == constant.type);
switch (type) {
}
TConstUnion operator^(const TConstUnion& constant) const
- {
+ {
TConstUnion returnValue;
assert(type == constant.type);
switch (type) {
}
TConstUnion operator~() const
- {
+ {
TConstUnion returnValue;
switch (type) {
case EbtInt: returnValue.setIConst(~iConst); break;
}
TConstUnion operator&&(const TConstUnion& constant) const
- {
+ {
TConstUnion returnValue;
assert(type == constant.type);
switch (type) {
}
TConstUnion operator||(const TConstUnion& constant) const
- {
+ {
TConstUnion returnValue;
assert(type == constant.type);
switch (type) {
// One convenience is being able to use [] to go inside the array, instead
// of C++ assuming it as an array of pointers to vectors.
//
-// General usage is that the size is known up front, and it is
+// General usage is that the size is known up front, and it is
// created once with the proper size.
//
class TConstUnionArray {
TInfoSinkBase& operator<<(const char* s) { append(s); return *this; }
TInfoSinkBase& operator<<(int n) { append(String(n)); return *this; }
TInfoSinkBase& operator<<(unsigned int n) { append(String(n)); return *this; }
- TInfoSinkBase& operator<<(float n) { const int size = 40; char buf[size];
+ TInfoSinkBase& operator<<(float n) { const int size = 40; char buf[size];
snprintf(buf, size, (fabs(n) > 1e-8 && fabs(n) < 1e8) || n == 0.0f ? "%f" : "%g", n);
- append(buf);
+ append(buf);
return *this; }
TInfoSinkBase& operator+(const TPersistString& t) { append(t); return *this; }
TInfoSinkBase& operator+(const TString& t) { append(t); return *this; }
append(s);
append("\n");
}
-
+
void setOutputStream(int output = 4)
{
outputStream = output;
}
protected:
- void append(const char* s);
+ void append(const char* s);
void append(int count, char c);
void append(const TPersistString& t);
void append(const TString& t);
- void checkMem(size_t growth) { if (sink.capacity() < sink.size() + growth + 2)
+ void checkMem(size_t growth) { if (sink.capacity() < sink.size() + growth + 2)
sink.reserve(sink.capacity() + sink.capacity() / 2); }
void appendToStream(const char* s);
TPersistString sink;
//
// This header defines an allocator that can be used to efficiently
-// allocate a large number of small requests for heap memory, with the
-// intention that they are not individually deallocated, but rather
+// allocate a large number of small requests for heap memory, with the
+// intention that they are not individually deallocated, but rather
// collectively deallocated at one time.
//
// This simultaneously
// If we are using guard blocks, we must track each individual
// allocation. If we aren't using guard blocks, these
// never get instantiated, so won't have any impact.
-//
+//
class TAllocation {
public:
memset(postGuard(), guardBlockEndVal, guardBlockSize);
# endif
}
-
+
void check() const {
checkGuardBlock(preGuard(), guardBlockBeginVal, "before");
checkGuardBlock(postGuard(), guardBlockEndVal, "after");
inline static size_t allocationSize(size_t size) {
return size + 2 * guardBlockSize + headerSize();
}
-
+
// Offset from surrounding buffer to get to user data buffer.
inline static unsigned char* offsetAllocation(unsigned char* m) {
return m + guardBlockSize + headerSize();
const static unsigned char userDataFill;
const static size_t guardBlockSize;
-# ifdef GUARD_BLOCKS
+# ifdef GUARD_BLOCKS
inline static size_t headerSize() { return sizeof(TAllocation); }
# else
inline static size_t headerSize() { return 0; }
# endif
};
-
+
//
// There are several stacks. One is to track the pushing and popping
-// of the user, and not yet implemented. The others are simply a
+// of the user, and not yet implemented. The others are simply a
// repositories of free pages or used pages.
//
// Page stacks are linked together with a simple header at the beginning
// re-use.
//
// The "page size" used is not, nor must it match, the underlying OS
-// page size. But, having it be about that size or equal to a set of
+// page size. But, having it be about that size or equal to a set of
// pages is likely most optimal.
//
class TPoolAllocator {
protected:
friend struct tHeader;
-
+
struct tHeader {
tHeader(tHeader* nextPage, size_t pageCount) :
#ifdef GUARD_BLOCKS
}
size_t pageSize; // granularity of allocation from the OS
- size_t alignment; // all returned allocations will be aligned at
+ size_t alignment; // all returned allocations will be aligned at
// this granularity, which will be a power of 2
size_t alignmentMask;
size_t headerSkip; // amount of memory to skip to make room for the
TPoolAllocator(const TPoolAllocator&); // don't allow default copy constructor
};
-
//
// There could potentially be many pools with pops happening at
// different times. But a simple use is to have a global pop
typedef T& reference;
typedef const T& const_reference;
typedef T value_type;
- template<class Other>
+ template<class Other>
struct rebind {
typedef pool_allocator<Other> other;
};
template<class Other>
pool_allocator(const pool_allocator<Other>& p) : allocator(p.getAllocator()) { }
- pointer allocate(size_type n) {
+ pointer allocate(size_type n) {
return reinterpret_cast<pointer>(getAllocator().allocate(n * sizeof(T))); }
- pointer allocate(size_type n, const void*) {
+ pointer allocate(size_type n, const void*) {
return reinterpret_cast<pointer>(getAllocator().allocate(n * sizeof(T))); }
void deallocate(void*, size_type) { }
int maxComputeImageUniforms;
int maxComputeAtomicCounters;
int maxComputeAtomicCounterBuffers;
- int maxVaryingComponents;
+ int maxVaryingComponents;
int maxVertexOutputComponents;
int maxGeometryInputComponents;
int maxGeometryOutputComponents;
// This should not be included by driver code.
//
-
-#define SH_EXPORTING
+#define SH_EXPORTING
#include "../Public/ShaderLang.h"
#include "../MachineIndependent/Versions.h"
#include "InfoSink.h"
TUniformMap() { }
virtual ~TUniformMap() { }
virtual TUniformMap* getAsUniformMap() { return this; }
- virtual int getLocation(const char* name) = 0;
+ virtual int getLocation(const char* name) = 0;
virtual TInfoSink& getInfoSink() { return infoSink; }
TInfoSink infoSink;
};
virtual TCompiler* getAsCompiler() { return this; }
virtual bool linkable() { return haveValidObjectCode; }
-
+
TInfoSink& infoSink;
protected:
TCompiler& operator=(TCompiler&);
class TLinker : public TShHandleBase {
public:
- TLinker(EShExecutable e, TInfoSink& iSink) :
+ TLinker(EShExecutable e, TInfoSink& iSink) :
infoSink(iSink),
- executable(e),
+ executable(e),
haveReturnableObjectCode(false),
appAttributeBindings(0),
fixedAttributeBindings(0),
const ShBindingTable* fixedAttributeBindings;
const int* excludedAttributes;
int excludedCount;
- ShBindingTable* uniformBindings; // created by the linker
+ ShBindingTable* uniformBindings; // created by the linker
};
//
// and the machine dependent code.
//
// The machine dependent code should derive from the classes
-// above. Then Construct*() and Delete*() will create and
+// above. Then Construct*() and Delete*() will create and
// destroy the machine dependent objects, which contain the
// above machine independent information.
//
TShHandleBase* ConstructBindings();
void DeleteLinker(TShHandleBase*);
void DeleteBindingList(TShHandleBase* bindingList);
-
+
TUniformMap* ConstructUniformMap();
void DeleteCompiler(TCompiler*);
clearLayout();
}
- // Drop just the storage qualification, which perhaps should
+ // Drop just the storage qualification, which perhaps should
// never be done, as it is fundamentally inconsistent, but need to
// explore what downstream consumers need.
// E.g., in a deference, it is an inconsistency between:
TLayoutDepth layoutDepth;
bool blendEquation; // true if any blend equation was specified
-#ifdef NV_EXTENSIONS
+#ifdef NV_EXTENSIONS
bool layoutOverrideCoverage; // true if layout override_coverage set
-#endif
+#endif
void init()
{
earlyFragmentTests = false;
layoutDepth = EldNone;
blendEquation = false;
-#ifdef NV_EXTENSIONS
+#ifdef NV_EXTENSIONS
layoutOverrideCoverage = false;
#endif
}
layoutDepth = src.layoutDepth;
if (src.blendEquation)
blendEquation = src.blendEquation;
-#ifdef NV_EXTENSIONS
+#ifdef NV_EXTENSIONS
if (src.layoutOverrideCoverage)
layoutOverrideCoverage = src.layoutOverrideCoverage;
-#endif
+#endif
}
};
qualifier.storage = q;
}
// for explicit precision qualifier
- TType(TBasicType t, TStorageQualifier q, TPrecisionQualifier p, int vs = 1, int mc = 0, int mr = 0,
+ TType(TBasicType t, TStorageQualifier q, TPrecisionQualifier p, int vs = 1, int mc = 0, int mr = 0,
bool isVector = false) :
basicType(t), vectorSize(vs), matrixCols(mc), matrixRows(mr), vector1(isVector && vs == 1),
arraySizes(nullptr), structure(nullptr), fieldName(nullptr), typeName(nullptr)
return false;
}
}
-
+
// Recursively checks if the type contains the given basic type
virtual bool containsBasicType(TBasicType checkType) const
{
p += snprintf(p, end - p, "passthrough ");
#endif
-
p += snprintf(p, end - p, ") ");
}
}
// Definition of the in-memory high-level intermediate representation
// of shaders. This is a tree that parser creates.
//
-// Nodes in the tree are defined as a hierarchy of classes derived from
+// Nodes in the tree are defined as a hierarchy of classes derived from
// TIntermNode. Each is a node in a tree. There is no preset branching factor;
// each node can have it's own type of list of children.
//
EOpNull, // if in a node, should only mean a node is still being built
EOpSequence, // denotes a list of statements, or parameters, etc.
EOpLinkerObjects, // for aggregate node of objects the linker may need, if not reference by the rest of the AST
- EOpFunctionCall,
+ EOpFunctionCall,
EOpFunction, // For function definition
EOpParameters, // an aggregate listing the parameters to a function
//
// Unary operators
//
-
+
EOpNegative,
EOpLogicalNot,
EOpVectorLogicalNot,
//
// moves
//
-
+
EOpAssign,
EOpAddAssign,
EOpSubAssign,
virtual void setType(const TType& t) { type.shallowCopy(t); }
virtual const TType& getType() const { return type; }
virtual TType& getWritableType() { return type; }
-
+
virtual TBasicType getBasicType() const { return type.getBasicType(); }
virtual TQualifier& getQualifier() { return type.getQualifier(); }
virtual const TQualifier& getQualifier() const { return type.getQualifier(); }
//
class TIntermLoop : public TIntermNode {
public:
- TIntermLoop(TIntermNode* aBody, TIntermTyped* aTest, TIntermTyped* aTerminal, bool testFirst) :
+ TIntermLoop(TIntermNode* aBody, TIntermTyped* aTest, TIntermTyped* aTerminal, bool testFirst) :
body(aBody),
test(aTest),
terminal(aTerminal),
};
//
-// For traversing the tree. User should derive from this,
+// For traversing the tree. User should derive from this,
// put their traversal specific data in it, and then pass
// it to a Traverse method.
//
// the subtree). Similarly for inVisit for in-order visiting of nodes with
// multiple children.
//
-// If you only want post-visits, explicitly turn off preVisit (and inVisit)
+// If you only want post-visits, explicitly turn off preVisit (and inVisit)
// and turn on postVisit.
//
-// In general, for the visit*() methods, return true from interior nodes
+// In general, for the visit*() methods, return true from interior nodes
// to have the traversal continue on to children.
//
// If you process children yourself, or don't want them processed, return false.
// For the version, it uses the latest git tag followed by the number of commits.
// For the date, it uses the current date (when then script is run).
-#define GLSLANG_REVISION "Overload400-PrecQual.1744"
-#define GLSLANG_DATE "05-Jan-2017"
+#define GLSLANG_REVISION "Overload400-PrecQual.1747"
+#define GLSLANG_DATE "06-Jan-2017"
-// The file revision.h should be updated to the latest version, somehow, on \r
+// The file revision.h should be updated to the latest version, somehow, on\r
// check-in, if glslang has changed.\r
//\r
// revision.template is the source for revision.h when using SubWCRev as the\r
}
void TInfoSinkBase::append(int count, char c)
-{
+{
if (outputStream & EString) {
checkMem(count);
- sink.append(count, c);
+ sink.append(count, c);
}
//#ifdef _WIN32
}
void TInfoSinkBase::append(const TPersistString& t)
-{
+{
if (outputStream & EString) {
checkMem(t.size());
- sink.append(t);
+ sink.append(t);
}
//#ifdef _WIN32
}
void TInfoSinkBase::append(const TString& t)
-{
+{
if (outputStream & EString) {
checkMem(t.size());
- sink.append(t.c_str());
+ sink.append(t.c_str());
}
//#ifdef _WIN32
//
//
-// Create strings that declare built-in definitions, add built-ins programmatically
+// Create strings that declare built-in definitions, add built-ins programmatically
// that cannot be expressed in the strings, and establish mappings between
// built-in functions and operators.
//
{
}
-
//
// Add all context-independent built-in functions and variables that are present
// for the given version and profile. Share common ones across stages, otherwise
"vec2 radians(vec2 degrees);"
"vec3 radians(vec3 degrees);"
"vec4 radians(vec4 degrees);"
-
+
"float degrees(float radians);"
"vec2 degrees(vec2 radians);"
"vec3 degrees(vec3 radians);"
"vec4 degrees(vec4 radians);"
-
+
"float sin(float angle);"
"vec2 sin(vec2 angle);"
"vec3 sin(vec3 angle);"
"vec4 sin(vec4 angle);"
-
+
"float cos(float angle);"
"vec2 cos(vec2 angle);"
"vec3 cos(vec3 angle);"
"vec4 cos(vec4 angle);"
-
+
"float tan(float angle);"
"vec2 tan(vec2 angle);"
"vec3 tan(vec3 angle);"
"vec4 tan(vec4 angle);"
-
+
"float asin(float x);"
"vec2 asin(vec2 x);"
"vec3 asin(vec3 x);"
"vec4 asin(vec4 x);"
-
+
"float acos(float x);"
"vec2 acos(vec2 x);"
"vec3 acos(vec3 x);"
"vec4 acos(vec4 x);"
-
+
"float atan(float y, float x);"
"vec2 atan(vec2 y, vec2 x);"
"vec3 atan(vec3 y, vec3 x);"
"vec4 atan(vec4 y, vec4 x);"
-
+
"float atan(float y_over_x);"
"vec2 atan(vec2 y_over_x);"
"vec3 atan(vec3 y_over_x);"
"vec4 atan(vec4 y_over_x);"
-
+
"\n");
if (version >= 130) {
"vec2 sinh(vec2 angle);"
"vec3 sinh(vec3 angle);"
"vec4 sinh(vec4 angle);"
-
+
"float cosh(float angle);"
"vec2 cosh(vec2 angle);"
"vec3 cosh(vec3 angle);"
"vec4 cosh(vec4 angle);"
-
+
"float tanh(float angle);"
"vec2 tanh(vec2 angle);"
"vec3 tanh(vec3 angle);"
"vec4 tanh(vec4 angle);"
-
+
"float asinh(float x);"
"vec2 asinh(vec2 x);"
"vec3 asinh(vec3 x);"
"vec4 asinh(vec4 x);"
-
+
"float acosh(float x);"
"vec2 acosh(vec2 x);"
"vec3 acosh(vec3 x);"
"vec4 acosh(vec4 x);"
-
+
"float atanh(float y_over_x);"
"vec2 atanh(vec2 y_over_x);"
"vec3 atanh(vec3 y_over_x);"
"vec4 atanh(vec4 y_over_x);"
-
+
"\n");
}
"vec2 pow(vec2 x, vec2 y);"
"vec3 pow(vec3 x, vec3 y);"
"vec4 pow(vec4 x, vec4 y);"
-
+
"float exp(float x);"
"vec2 exp(vec2 x);"
"vec3 exp(vec3 x);"
"vec4 exp(vec4 x);"
-
+
"float log(float x);"
"vec2 log(vec2 x);"
"vec3 log(vec3 x);"
"vec4 log(vec4 x);"
-
+
"float exp2(float x);"
"vec2 exp2(vec2 x);"
"vec3 exp2(vec3 x);"
"vec4 exp2(vec4 x);"
-
+
"float log2(float x);"
"vec2 log2(vec2 x);"
"vec3 log2(vec3 x);"
"vec4 log2(vec4 x);"
-
+
"float sqrt(float x);"
"vec2 sqrt(vec2 x);"
"vec3 sqrt(vec3 x);"
"vec4 sqrt(vec4 x);"
-
+
"float inversesqrt(float x);"
"vec2 inversesqrt(vec2 x);"
"vec3 inversesqrt(vec3 x);"
"vec4 inversesqrt(vec4 x);"
-
+
"\n");
//
"vec2 abs(vec2 x);"
"vec3 abs(vec3 x);"
"vec4 abs(vec4 x);"
-
+
"float sign(float x);"
"vec2 sign(vec2 x);"
"vec3 sign(vec3 x);"
"vec4 sign(vec4 x);"
-
+
"float floor(float x);"
"vec2 floor(vec2 x);"
"vec3 floor(vec3 x);"
"vec4 floor(vec4 x);"
-
+
"float ceil(float x);"
"vec2 ceil(vec2 x);"
"vec3 ceil(vec3 x);"
"vec4 ceil(vec4 x);"
-
+
"float fract(float x);"
"vec2 fract(vec2 x);"
"vec3 fract(vec3 x);"
"vec4 fract(vec4 x);"
-
+
"float mod(float x, float y);"
"vec2 mod(vec2 x, float y);"
"vec3 mod(vec3 x, float y);"
"vec2 mod(vec2 x, vec2 y);"
"vec3 mod(vec3 x, vec3 y);"
"vec4 mod(vec4 x, vec4 y);"
-
+
"float min(float x, float y);"
"vec2 min(vec2 x, float y);"
"vec3 min(vec3 x, float y);"
"vec2 min(vec2 x, vec2 y);"
"vec3 min(vec3 x, vec3 y);"
"vec4 min(vec4 x, vec4 y);"
-
+
"float max(float x, float y);"
"vec2 max(vec2 x, float y);"
"vec3 max(vec3 x, float y);"
"vec2 max(vec2 x, vec2 y);"
"vec3 max(vec3 x, vec3 y);"
"vec4 max(vec4 x, vec4 y);"
-
+
"float clamp(float x, float minVal, float maxVal);"
"vec2 clamp(vec2 x, float minVal, float maxVal);"
"vec3 clamp(vec3 x, float minVal, float maxVal);"
"vec2 clamp(vec2 x, vec2 minVal, vec2 maxVal);"
"vec3 clamp(vec3 x, vec3 minVal, vec3 maxVal);"
"vec4 clamp(vec4 x, vec4 minVal, vec4 maxVal);"
-
+
"float mix(float x, float y, float a);"
"vec2 mix(vec2 x, vec2 y, float a);"
"vec3 mix(vec3 x, vec3 y, float a);"
"vec2 step(float edge, vec2 x);"
"vec3 step(float edge, vec3 x);"
"vec4 step(float edge, vec4 x);"
-
+
"float smoothstep(float edge0, float edge1, float x);"
"vec2 smoothstep(vec2 edge0, vec2 edge1, vec2 x);"
"vec3 smoothstep(vec3 edge0, vec3 edge1, vec3 x);"
"vec2 smoothstep(float edge0, float edge1, vec2 x);"
"vec3 smoothstep(float edge0, float edge1, vec3 x);"
"vec4 smoothstep(float edge0, float edge1, vec4 x);"
-
+
"\n");
if (version >= 130) {
"vec2 trunc(vec2 x);"
"vec3 trunc(vec3 x);"
"vec4 trunc(vec4 x);"
-
+
"float round(float x);"
"vec2 round(vec2 x);"
"vec3 round(vec3 x);"
"vec4 round(vec4 x);"
-
+
"float roundEven(float x);"
"vec2 roundEven(vec2 x);"
"vec3 roundEven(vec3 x);"
"vec4 roundEven(vec4 x);"
-
+
"float modf(float, out float);"
"vec2 modf(vec2, out vec2 );"
"vec3 modf(vec3, out vec3 );"
"vec4 modf(vec4, out vec4 );"
-
+
" int min(int x, int y);"
"ivec2 min(ivec2 x, int y);"
"ivec3 min(ivec3 x, int y);"
"ivec2 min(ivec2 x, ivec2 y);"
"ivec3 min(ivec3 x, ivec3 y);"
"ivec4 min(ivec4 x, ivec4 y);"
-
+
" uint min(uint x, uint y);"
"uvec2 min(uvec2 x, uint y);"
"uvec3 min(uvec3 x, uint y);"
"uvec2 min(uvec2 x, uvec2 y);"
"uvec3 min(uvec3 x, uvec3 y);"
"uvec4 min(uvec4 x, uvec4 y);"
-
+
" int max(int x, int y);"
"ivec2 max(ivec2 x, int y);"
"ivec3 max(ivec3 x, int y);"
"dvec2 faceforward(dvec2, dvec2, dvec2);"
"dvec3 faceforward(dvec3, dvec3, dvec3);"
"dvec4 faceforward(dvec4, dvec4, dvec4);"
-
+
"double reflect(double, double);"
"dvec2 reflect(dvec2 , dvec2 );"
"dvec3 reflect(dvec3 , dvec3 );"
"dvec4 reflect(dvec4 , dvec4 );"
-
+
"double refract(double, double, double);"
"dvec2 refract(dvec2 , dvec2 , double);"
"dvec3 refract(dvec3 , dvec3 , double);"
"float length(vec2 x);"
"float length(vec3 x);"
"float length(vec4 x);"
-
+
"float distance(float p0, float p1);"
"float distance(vec2 p0, vec2 p1);"
"float distance(vec3 p0, vec3 p1);"
"float distance(vec4 p0, vec4 p1);"
-
+
"float dot(float x, float y);"
"float dot(vec2 x, vec2 y);"
"float dot(vec3 x, vec3 y);"
"float dot(vec4 x, vec4 y);"
-
+
"vec3 cross(vec3 x, vec3 y);"
"float normalize(float x);"
"vec2 normalize(vec2 x);"
"vec3 normalize(vec3 x);"
"vec4 normalize(vec4 x);"
-
+
"float faceforward(float N, float I, float Nref);"
"vec2 faceforward(vec2 N, vec2 I, vec2 Nref);"
"vec3 faceforward(vec3 N, vec3 I, vec3 Nref);"
"vec4 faceforward(vec4 N, vec4 I, vec4 Nref);"
-
+
"float reflect(float I, float N);"
"vec2 reflect(vec2 I, vec2 N);"
"vec3 reflect(vec3 I, vec3 N);"
"vec4 reflect(vec4 I, vec4 N);"
-
+
"float refract(float I, float N, float eta);"
"vec2 refract(vec2 I, vec2 N, float eta);"
"vec3 refract(vec3 I, vec3 N, float eta);"
"vec4 refract(vec4 I, vec4 N, float eta);"
-
+
"\n");
//
"mat2 matrixCompMult(mat2 x, mat2 y);"
"mat3 matrixCompMult(mat3 x, mat3 y);"
"mat4 matrixCompMult(mat4 x, mat4 y);"
-
+
"\n");
// 120 is correct for both ES and desktop
"mat4x2 outerProduct(vec2 c, vec4 r);"
"mat3x4 outerProduct(vec4 c, vec3 r);"
"mat4x3 outerProduct(vec3 c, vec4 r);"
-
+
"mat2 transpose(mat2 m);"
"mat3 transpose(mat3 m);"
"mat4 transpose(mat4 m);"
"mat3x2 matrixCompMult(mat3x2, mat3x2);"
"mat3x4 matrixCompMult(mat3x4, mat3x4);"
"mat4x2 matrixCompMult(mat4x2, mat4x2);"
- "mat4x3 matrixCompMult(mat4x3, mat4x3);"
-
+ "mat4x3 matrixCompMult(mat4x3, mat4x3);"
+
"\n");
// 150 is correct for both ES and desktop
"float determinant(mat2 m);"
"float determinant(mat3 m);"
"float determinant(mat4 m);"
-
+
"mat2 inverse(mat2 m);"
"mat3 inverse(mat3 m);"
"mat4 inverse(mat4 m);"
-
+
"\n");
}
}
"bvec2 lessThan(vec2 x, vec2 y);"
"bvec3 lessThan(vec3 x, vec3 y);"
"bvec4 lessThan(vec4 x, vec4 y);"
-
+
"bvec2 lessThan(ivec2 x, ivec2 y);"
"bvec3 lessThan(ivec3 x, ivec3 y);"
"bvec4 lessThan(ivec4 x, ivec4 y);"
-
+
"bvec2 lessThanEqual(vec2 x, vec2 y);"
"bvec3 lessThanEqual(vec3 x, vec3 y);"
"bvec4 lessThanEqual(vec4 x, vec4 y);"
-
+
"bvec2 lessThanEqual(ivec2 x, ivec2 y);"
"bvec3 lessThanEqual(ivec3 x, ivec3 y);"
"bvec4 lessThanEqual(ivec4 x, ivec4 y);"
-
+
"bvec2 greaterThan(vec2 x, vec2 y);"
"bvec3 greaterThan(vec3 x, vec3 y);"
"bvec4 greaterThan(vec4 x, vec4 y);"
-
+
"bvec2 greaterThan(ivec2 x, ivec2 y);"
"bvec3 greaterThan(ivec3 x, ivec3 y);"
"bvec4 greaterThan(ivec4 x, ivec4 y);"
-
+
"bvec2 greaterThanEqual(vec2 x, vec2 y);"
"bvec3 greaterThanEqual(vec3 x, vec3 y);"
"bvec4 greaterThanEqual(vec4 x, vec4 y);"
-
+
"bvec2 greaterThanEqual(ivec2 x, ivec2 y);"
"bvec3 greaterThanEqual(ivec3 x, ivec3 y);"
"bvec4 greaterThanEqual(ivec4 x, ivec4 y);"
-
+
"bvec2 equal(vec2 x, vec2 y);"
"bvec3 equal(vec3 x, vec3 y);"
"bvec4 equal(vec4 x, vec4 y);"
-
+
"bvec2 equal(ivec2 x, ivec2 y);"
"bvec3 equal(ivec3 x, ivec3 y);"
"bvec4 equal(ivec4 x, ivec4 y);"
-
+
"bvec2 equal(bvec2 x, bvec2 y);"
"bvec3 equal(bvec3 x, bvec3 y);"
"bvec4 equal(bvec4 x, bvec4 y);"
-
+
"bvec2 notEqual(vec2 x, vec2 y);"
"bvec3 notEqual(vec3 x, vec3 y);"
"bvec4 notEqual(vec4 x, vec4 y);"
-
+
"bvec2 notEqual(ivec2 x, ivec2 y);"
"bvec3 notEqual(ivec3 x, ivec3 y);"
"bvec4 notEqual(ivec4 x, ivec4 y);"
-
+
"bvec2 notEqual(bvec2 x, bvec2 y);"
"bvec3 notEqual(bvec3 x, bvec3 y);"
"bvec4 notEqual(bvec4 x, bvec4 y);"
-
+
"bool any(bvec2 x);"
"bool any(bvec3 x);"
"bool any(bvec4 x);"
-
+
"bool all(bvec2 x);"
"bool all(bvec3 x);"
"bool all(bvec4 x);"
-
+
"bvec2 not(bvec2 x);"
"bvec3 not(bvec3 x);"
"bvec4 not(bvec4 x);"
-
+
"\n");
if (version >= 130) {
"bvec2 lessThan(uvec2 x, uvec2 y);"
"bvec3 lessThan(uvec3 x, uvec3 y);"
"bvec4 lessThan(uvec4 x, uvec4 y);"
-
+
"bvec2 lessThanEqual(uvec2 x, uvec2 y);"
"bvec3 lessThanEqual(uvec3 x, uvec3 y);"
"bvec4 lessThanEqual(uvec4 x, uvec4 y);"
-
+
"bvec2 greaterThan(uvec2 x, uvec2 y);"
"bvec3 greaterThan(uvec3 x, uvec3 y);"
"bvec4 greaterThan(uvec4 x, uvec4 y);"
-
+
"bvec2 greaterThanEqual(uvec2 x, uvec2 y);"
"bvec3 greaterThanEqual(uvec3 x, uvec3 y);"
"bvec4 greaterThanEqual(uvec4 x, uvec4 y);"
-
+
"bvec2 equal(uvec2 x, uvec2 y);"
"bvec3 equal(uvec3 x, uvec3 y);"
"bvec4 equal(uvec4 x, uvec4 y);"
"bvec2 notEqual(uvec2 x, uvec2 y);"
"bvec3 notEqual(uvec3 x, uvec3 y);"
- "bvec4 notEqual(uvec4 x, uvec4 y);"
-
+ "bvec4 notEqual(uvec4 x, uvec4 y);"
+
"\n");
}
"vec4 texture3DProj(sampler3D, vec4);" // OES_texture_3D, but caught by keyword check
"vec4 textureCube(samplerCube, vec3);"
-
+
"\n");
}
}
"vec4 texture1DProj(sampler1D, vec2);"
"vec4 texture1DProj(sampler1D, vec4);"
-
+
"vec4 shadow1D(sampler1DShadow, vec3);"
"vec4 shadow2D(sampler2DShadow, vec3);"
"vec4 shadow1DProj(sampler1DShadow, vec4);"
"float noise1(vec2 x);"
"float noise1(vec3 x);"
"float noise1(vec4 x);"
-
+
"vec2 noise2(float x);"
"vec2 noise2(vec2 x);"
"vec2 noise2(vec3 x);"
"vec2 noise2(vec4 x);"
-
+
"vec3 noise3(float x);"
"vec3 noise3(vec2 x);"
"vec3 noise3(vec3 x);"
"vec3 noise3(vec4 x);"
-
+
"vec4 noise4(float x);"
"vec4 noise4(vec2 x);"
"vec4 noise4(vec3 x);"
"vec4 noise4(vec4 x);"
-
+
"\n");
}
"lowp ivec2 findMSB(highp uvec2);"
"lowp ivec3 findMSB(highp uvec3);"
"lowp ivec4 findMSB(highp uvec4);"
-
+
"\n");
}
"vec2 dFdx(vec2 p);"
"vec3 dFdx(vec3 p);"
"vec4 dFdx(vec4 p);"
-
+
"float dFdy(float p);"
"vec2 dFdy(vec2 p);"
"vec3 dFdy(vec3 p);"
"vec2 dFdyCoarse(vec2 p);"
"vec3 dFdyCoarse(vec3 p);"
"vec4 dFdyCoarse(vec4 p);"
-
+
"float fwidthCoarse(float p);"
"vec2 fwidthCoarse(vec2 p);"
"vec3 fwidthCoarse(vec3 p);"
"vec4 fwidthCoarse(vec4 p);"
-
+
"\n");
}
"uniform mat4 gl_ModelViewMatrixInverse;"
"uniform mat4 gl_ProjectionMatrixInverse;"
"uniform mat4 gl_ModelViewProjectionMatrixInverse;"
-
+
"uniform mat4 gl_ModelViewMatrixTranspose;"
"uniform mat4 gl_ProjectionMatrixTranspose;"
"uniform mat4 gl_ModelViewProjectionMatrixTranspose;"
-
+
"uniform mat4 gl_ModelViewMatrixInverseTranspose;"
"uniform mat4 gl_ProjectionMatrixInverseTranspose;"
"uniform mat4 gl_ModelViewProjectionMatrixInverseTranspose;"
"float quadraticAttenuation;"// K2
"};"
-
"struct gl_LightModelParameters {"
"vec4 ambient;" // Acs
"};"
"};"
"uniform gl_FogParameters gl_Fog;"
-
+
"\n");
}
// Define the interface to the vertex shader.
//
//============================================================================
-
+
if (profile != EEsProfile) {
if (version < 130) {
stageBuiltins[EShLangVertex].append(
"in vec4 gl_MultiTexCoord5;"
"in vec4 gl_MultiTexCoord6;"
"in vec4 gl_MultiTexCoord7;"
- "in float gl_FogCoord;"
+ "in float gl_FogCoord;"
"\n");
}
"vec4 gl_Position;" // needs qualifier fixed later
"float gl_PointSize;" // needs qualifier fixed later
"float gl_ClipDistance[];"
- );
+ );
if (IncludeLegacy(version, profile, spvVersion))
stageBuiltins[EShLangVertex].append(
"vec4 gl_ClipVertex;" // needs qualifier fixed later
);
}
-
//============================================================================
//
// Define the interface to the tessellation control shader.
"patch in float gl_TessLevelOuter[4];"
"patch in float gl_TessLevelInner[2];"
-
+
"out gl_PerVertex {"
"vec4 gl_Position;"
"float gl_PointSize;"
"patch in highp float gl_TessLevelOuter[4];"
"patch in highp float gl_TessLevelInner[2];"
-
+
"out gl_PerVertex {"
"highp vec4 gl_Position;"
"highp float gl_PointSize;"
}
//
-// Helper function for initialize(), to add the second set of names for texturing,
+// Helper function for initialize(), to add the second set of names for texturing,
// when adding context-independent built-in functions.
//
void TBuiltIns::add2ndGenerationSamplingImaging(int version, EProfile profile, const SpvVersion& spvVersion)
}
//
-// Helper function for add2ndGenerationSamplingImaging(),
+// Helper function for add2ndGenerationSamplingImaging(),
// when adding context-independent built-in functions.
//
// Add all the query functions for the given type.
}
//
-// Helper function for add2ndGenerationSamplingImaging(),
+// Helper function for add2ndGenerationSamplingImaging(),
// when adding context-independent built-in functions.
//
// Add all the image access functions for the given type.
" imageAtomicOr(volatile coherent ",
" imageAtomicXor(volatile coherent ",
" imageAtomicExchange(volatile coherent "
- };
+ };
for (size_t i = 0; i < numBuiltins; ++i) {
commonBuiltins.append(dataType);
}
//
-// Helper function for add2ndGenerationSamplingImaging(),
+// Helper function for add2ndGenerationSamplingImaging(),
// when adding context-independent built-in functions.
//
// Add all the texture lookup functions for the given type.
}
}
-
//
-// Helper function for add2ndGenerationSamplingImaging(),
+// Helper function for add2ndGenerationSamplingImaging(),
// when adding context-independent built-in functions.
//
// Add all the texture gather functions for the given type.
s.append(builtInConstant);
snprintf(builtInConstant, maxSize, "const int gl_MaxTessControlTotalOutputComponents = %d;", resources.maxTessControlTotalOutputComponents);
s.append(builtInConstant);
-
+
snprintf(builtInConstant, maxSize, "const int gl_MaxTessEvaluationInputComponents = %d;", resources.maxTessEvaluationInputComponents);
s.append(builtInConstant);
snprintf(builtInConstant, maxSize, "const int gl_MaxTessEvaluationOutputComponents = %d;", resources.maxTessEvaluationOutputComponents);
s.append(builtInConstant);
snprintf(builtInConstant, maxSize, "const int gl_MaxTessEvaluationUniformComponents = %d;", resources.maxTessEvaluationUniformComponents);
s.append(builtInConstant);
-
+
snprintf(builtInConstant, maxSize, "const int gl_MaxTessPatchComponents = %d;", resources.maxTessPatchComponents);
s.append(builtInConstant);
s.append(builtInConstant);
snprintf(builtInConstant, maxSize, "const int gl_MaxTessControlTotalOutputComponents = %d;", resources.maxTessControlTotalOutputComponents);
s.append(builtInConstant);
-
+
snprintf(builtInConstant, maxSize, "const int gl_MaxTessEvaluationInputComponents = %d;", resources.maxTessEvaluationInputComponents);
s.append(builtInConstant);
snprintf(builtInConstant, maxSize, "const int gl_MaxTessEvaluationOutputComponents = %d;", resources.maxTessEvaluationOutputComponents);
s.append(builtInConstant);
snprintf(builtInConstant, maxSize, "const int gl_MaxTessEvaluationUniformComponents = %d;", resources.maxTessEvaluationUniformComponents);
s.append(builtInConstant);
-
+
snprintf(builtInConstant, maxSize, "const int gl_MaxTessPatchComponents = %d;", resources.maxTessPatchComponents);
s.append(builtInConstant);
snprintf(builtInConstant, maxSize, "const int gl_MaxTessGenLevel = %d;", resources.maxTessGenLevel);
}
// images (some in compute below)
- if ((profile == EEsProfile && version >= 310) ||
+ if ((profile == EEsProfile && version >= 310) ||
(profile != EEsProfile && version >= 130)) {
snprintf(builtInConstant, maxSize, "const int gl_MaxImageUnits = %d;", resources.maxImageUnits);
s.append(builtInConstant);
}
// atomic counters (some in compute below)
- if ((profile == EEsProfile && version >= 310) ||
+ if ((profile == EEsProfile && version >= 310) ||
(profile != EEsProfile && version >= 420)) {
snprintf(builtInConstant, maxSize, "const int gl_MaxVertexAtomicCounters = %d;", resources. maxVertexAtomicCounters);
s.append(builtInConstant);
s.append("\n");
}
-
// compute
if ((profile == EEsProfile && version >= 310) || (profile != EEsProfile && version >= 420)) {
snprintf(builtInConstant, maxSize, "const ivec3 gl_MaxComputeWorkGroupCount = ivec3(%d,%d,%d);", resources.maxComputeWorkGroupCountX,
resources.maxComputeWorkGroupCountY,
- resources.maxComputeWorkGroupCountZ);
+ resources.maxComputeWorkGroupCountZ);
s.append(builtInConstant);
snprintf(builtInConstant, maxSize, "const ivec3 gl_MaxComputeWorkGroupSize = ivec3(%d,%d,%d);", resources.maxComputeWorkGroupSizeX,
resources.maxComputeWorkGroupSizeY,
// N.B.: a symbol should only be tagged once, and this function is called multiple times, once
// per stage that's used for this profile. So
// - generally, stick common ones in the fragment stage to ensure they are tagged exactly once
- // - for ES, which has different precisions for different stages, the coarsest-grained tagging
+ // - for ES, which has different precisions for different stages, the coarsest-grained tagging
// for a built-in used in many stages needs to be once for the fragment stage and once for
// the vertex stage
symbolTable.setVariableExtensions("gl_NumSamples", 1, &E_GL_OES_sample_variables);
}
}
-
+
BuiltInVariable("gl_Layer", EbvLayer, symbolTable);
BuiltInVariable("gl_ViewportIndex", EbvViewportIndex, symbolTable);
//
// Add context-dependent (resource-specific) built-ins not handled by the above. These
-// would be ones that need to be programmatically added because they cannot
+// would be ones that need to be programmatically added because they cannot
// be added by simple text strings. For these, also
// 1) Map built-in functions to operators, for those that will turn into an operation node
// instead of remaining a function call.
virtual const TString& getStageString(EShLanguage language) const { return stageBuiltins[language]; }
virtual void identifyBuiltIns(int version, EProfile profile, const SpvVersion& spvVersion, EShLanguage language, TSymbolTable& symbolTable) = 0;
-
+
virtual void identifyBuiltIns(int version, EProfile profile, const SpvVersion& spvVersion, EShLanguage language, TSymbolTable& symbolTable, const TBuiltInResource &resources) = 0;
protected:
void initialize(const TBuiltInResource& resources, int version, EProfile, const SpvVersion& spvVersion, EShLanguage);
void identifyBuiltIns(int version, EProfile profile, const SpvVersion& spvVersion, EShLanguage language, TSymbolTable& symbolTable);
-
+
void identifyBuiltIns(int version, EProfile profile, const SpvVersion& spvVersion, EShLanguage language, TSymbolTable& symbolTable, const TBuiltInResource &resources);
protected:
int dimMap[EsdNumDims];
};
-
} // end namespace glslang
#endif // _INITIALIZE_INCLUDED_
return folded;
}
- // If either is a specialization constant, while the other is
+ // If either is a specialization constant, while the other is
// a constant (or specialization constant), the result is still
// a specialization constant, if the operation is an allowed
// specialization-constant operation.
node->setType(type);
return node;
}
-
+
//
// Low level: add unary node (no promotions or other argument modifications)
//
if (source == EShSourceHlsl) {
break; // HLSL can promote logical not
}
-
+
if (child->getType().getBasicType() != EbtBool || child->getType().isMatrix() || child->getType().isArray() || child->getType().isVector()) {
return nullptr;
}
// samplers can get assigned via a sampler constructor
// (well, not yet, but code in the rest of this function is ready for it)
- if (node->getBasicType() == EbtSampler && op == EOpAssign &&
+ if (node->getBasicType() == EbtSampler && op == EOpAssign &&
node->getAsOperator() != nullptr && node->getAsOperator()->getOp() == EOpConstructTextureSampler)
break;
if (fromConvertable && toConvertable) {
switch (op) {
case EOpAndAssign: // assignments can perform arbitrary conversions
- case EOpInclusiveOrAssign: // ...
- case EOpExclusiveOrAssign: // ...
- case EOpAssign: // ...
- case EOpAddAssign: // ...
- case EOpSubAssign: // ...
- case EOpMulAssign: // ...
- case EOpVectorTimesScalarAssign: // ...
- case EOpMatrixTimesScalarAssign: // ...
- case EOpDivAssign: // ...
- case EOpModAssign: // ...
+ case EOpInclusiveOrAssign: // ...
+ case EOpExclusiveOrAssign: // ...
+ case EOpAssign: // ...
+ case EOpAddAssign: // ...
+ case EOpSubAssign: // ...
+ case EOpMulAssign: // ...
+ case EOpVectorTimesScalarAssign: // ...
+ case EOpMatrixTimesScalarAssign: // ...
+ case EOpDivAssign: // ...
+ case EOpModAssign: // ...
case EOpReturn: // function returns can also perform arbitrary conversions
case EOpFunctionCall: // conversion of a calling parameter
case EOpLogicalNot:
return node;
}
-
TIntermTyped* TIntermediate::addComma(TIntermTyped* left, TIntermTyped* right, const TSourceLoc& loc)
{
// However, the lowest precedence operators of the sequence operator ( , ) and the assignment operators
return false;
}
-
void TIntermBinary::updatePrecision()
{
#ifdef AMD_EXTENSIONS
linkageSymbols.push_back(&symbol);
}
-// Make a shared symbol have a non-shared version that can be edited by the current
+// Make a shared symbol have a non-shared version that can be edited by the current
// compile, such that editing its type will not change the shared version and will
// effect all nodes already sharing it (non-shallow type),
// or adopting its full type after being edited (shallow type).
std::function<bool(const TType& from, const TType& to1, const TType& to2)> better,
/* output */ bool& tie)
{
-//
+//
// Operation
-//
+//
// 1. Prune the input list of candidates down to a list of viable candidates,
// where each viable candidate has
-//
+//
// * at least as many parameters as there are calling arguments, with any
// remaining parameters being optional or having default values
// * each parameter is true under convertible(A, B), where A is the calling
// type for in and B is the formal type, and in addition, for out B is the
// calling type and A is the formal type
-//
+//
// 2. If there are no viable candidates, return with no match.
-//
+//
// 3. If there is only one viable candidate, it is the best match.
//
// 4. If there are multiple viable candidates, select the first viable candidate
// that candidate is better (bullets below), make it the incumbent. Repeat, with
// a linear walk through the viable candidate list. The final incumbent will be
// returned as the best match. A viable candidate is better than the incumbent if
-//
+//
// * it has a function argument with a better(...) conversion than the incumbent,
// for all directions needed by in and out
// * the incumbent has no argument with a better(...) conversion then the
}
return true;
};
-
+
const TFunction* incumbent = viableCandidates.front();
for (auto it = viableCandidates.begin() + 1; it != viableCandidates.end(); ++it) {
const TFunction& candidate = *(*it);
if (incumbent == *it)
continue;
const TFunction& candidate = *(*it);
-
+
// In the case of default parameters, it may have an identical initial set, which is
// also ambiguous
if (betterParam(*incumbent, candidate) || equivalentParams(*incumbent, candidate))
TParseContext::TParseContext(TSymbolTable& symbolTable, TIntermediate& interm, bool parsingBuiltins,
int version, EProfile profile, const SpvVersion& spvVersion, EShLanguage language,
TInfoSink& infoSink, bool forwardCompatible, EShMessages messages) :
- TParseContextBase(symbolTable, interm, parsingBuiltins, version, profile, spvVersion, language, infoSink, forwardCompatible, messages),
+ TParseContextBase(symbolTable, interm, parsingBuiltins, version, profile, spvVersion, language, infoSink, forwardCompatible, messages),
contextPragma(true, false), loopNestingLevel(0), structNestingLevel(0), controlFlowNestingLevel(0), statementNestingLevel(0),
inMain(false), postMainReturn(false), currentFunctionType(nullptr), blockName(nullptr),
limits(resources.limits),
globalInputDefaults.clear();
globalOutputDefaults.clear();
- // "Shaders in the transform
+ // "Shaders in the transform
// feedback capturing mode have an initial global default of
// layout(xfb_buffer = 0) out;"
if (language == EShLangVertex ||
requireExtensions(loc, symbol->getNumExtensions(), symbol->getExtensions(), symbol->getName().c_str());
if (symbol && symbol->isReadOnly()) {
- // All shared things containing an implicitly sized array must be copied up
+ // All shared things containing an implicitly sized array must be copied up
// on first use, so that all future references will share its array structure,
// so that editing the implicit size will effect all nodes consuming it,
// and so that editing the implicit size won't change the shared one.
//
- // If this is a variable or a block, check it and all it contains, but if this
+ // If this is a variable or a block, check it and all it contains, but if this
// is a member of an anonymous block, check the whole block, as the whole block
// will need to be copied up if it contains an implicitly-sized array.
if (symbol->getType().containsImplicitlySizedArray() ||
error(loc, "", "[", "matrix index out of range '%d'", index);
index = type.getMatrixCols() - 1;
}
- }
+ }
}
// for ES 2.0 (version 100) limitations for almost all index operations except vertex-shader uniforms
}
}
-// Make a shared symbol have a non-shared version that can be edited by the current
+// Make a shared symbol have a non-shared version that can be edited by the current
// compile, such that editing its type will not change the shared version and will
// effect all nodes sharing it.
void TParseContext::makeEditable(TSymbol*& symbol)
// Issue any errors if the non-array object is missing arrayness WRT
// shader I/O that has array requirements.
-// All arrayness checking is handled in array paths, this is for
+// All arrayness checking is handled in array paths, this is for
void TParseContext::ioArrayCheck(const TSourceLoc& loc, const TType& type, const TString& identifier)
{
if (! type.isArray() && ! symbolTable.atBuiltInLevel()) {
// If there has been an input primitive declaration (geometry shader) or an output
// number of vertices declaration(tessellation shader), make sure all input array types
-// match it in size. Types come either from nodes in the AST or symbols in the
+// match it in size. Types come either from nodes in the AST or symbols in the
// symbol table.
//
// Types without an array size will be given one.
return result;
else
unaryOpError(loc, str, childNode->getCompleteString());
-
+
return childNode;
}
//
// .length() can't be resolved until we later see the function-calling syntax.
- // Save away the name in the AST for now. Processing is completed in
+ // Save away the name in the AST for now. Processing is completed in
// handleLengthMethod().
//
if (field == "length") {
if (symbolTable.atBuiltInLevel())
function.setDefined();
else {
- if (prevDec && ! builtIn)
+ if (prevDec && ! builtIn)
symbol->getAsFunction()->setPrototyped(); // need a writable one, but like having prevDec as a const
function.setPrototyped();
}
}
//
-// Handle seeing the function prototype in front of a function definition in the grammar.
+// Handle seeing the function prototype in front of a function definition in the grammar.
// The body is handled after this function returns.
//
TIntermAggregate* TParseContext::handleFunctionDefinition(const TSourceLoc& loc, TFunction& function)
if (argQualifier.writeonly && ! formalQualifier.writeonly)
error(arguments->getLoc(), message, "writeonly", "");
}
- // TODO 4.5 functionality: A shader will fail to compile
+ // TODO 4.5 functionality: A shader will fail to compile
// if the value passed to the memargument of an atomic memory function does not correspond to a buffer or
- // shared variable. It is acceptable to pass an element of an array or a single component of a vector to the
- // memargument of an atomic memory function, as long as the underlying array or vector is a buffer or
+ // shared variable. It is acceptable to pass an element of an array or a single component of a vector to the
+ // memargument of an atomic memory function, as long as the underlying array or vector is a buffer or
// shared variable.
}
operationPrecision = std::max(function[0].type->getQualifier().precision,
unaryNode->getOperand()->getType().getQualifier().precision);
if (function.getType().getBasicType() != EbtBool)
- resultPrecision = function.getType().getQualifier().precision == EpqNone ?
+ resultPrecision = function.getType().getQualifier().precision == EpqNone ?
operationPrecision :
function.getType().getQualifier().precision;
} else if (TIntermAggregate* agg = node.getAsAggregate()) {
if (agg->isSampling() || agg->getOp() == EOpImageLoad || agg->getOp() == EOpImageStore)
resultPrecision = sequence[0]->getAsTyped()->getQualifier().precision;
else if (function.getType().getBasicType() != EbtBool)
- resultPrecision = function.getType().getQualifier().precision == EpqNone ?
+ resultPrecision = function.getType().getQualifier().precision == EpqNone ?
operationPrecision :
function.getType().getQualifier().precision;
}
}
// Finish processing object.length(). This started earlier in handleDotDereference(), where
-// the ".length" part was recognized and semantically checked, and finished here where the
+// the ".length" part was recognized and semantically checked, and finished here where the
// function syntax "()" is recognized.
//
// Return resulting tree node.
return intermediate.addBuiltInFunctionCall(loc, EOpArrayLength, true, intermNode, TType(EbtInt));
} else if (type.isImplicitlySizedArray()) {
if (intermNode->getAsSymbolNode() && isIoResizeArray(type)) {
- // We could be between a layout declaration that gives a built-in io array implicit size and
- // a user redeclaration of that array, meaning we have to substitute its implicit size here
+ // We could be between a layout declaration that gives a built-in io array implicit size and
+ // a user redeclaration of that array, meaning we have to substitute its implicit size here
// without actually redeclaring the array. (It is an error to use a member before the
// redeclaration, but not an error to use the array name itself.)
const TString& name = intermNode->getAsSymbolNode()->getName();
if (imageType.getQualifier().layoutFormat != ElfR32i && imageType.getQualifier().layoutFormat != ElfR32ui)
error(loc, "only supported on image with format r32i or r32ui", fnCandidate.getName().c_str(), "");
} else {
- if (fnCandidate.getName().compare(0, 19, "imageAtomicExchange") != 0)
+ if (fnCandidate.getName().compare(0, 19, "imageAtomicExchange") != 0)
error(loc, "only supported on integer images", fnCandidate.getName().c_str(), "");
else if (imageType.getQualifier().layoutFormat != ElfR32f && profile == EEsProfile)
error(loc, "only supported on image with format r32f", fnCandidate.getName().c_str(), "");
//
void TParseContext::nonOpBuiltInCheck(const TSourceLoc& loc, const TFunction& fnCandidate, TIntermAggregate& callNode)
{
- // Further maintenance of this function is deprecated, because the "correct"
+ // Further maintenance of this function is deprecated, because the "correct"
// future-oriented design is to not have to do string compares on function names.
// If PureOperatorBuiltins == true, then all built-ins should be mapped
if (imageType.getQualifier().layoutFormat != ElfR32i && imageType.getQualifier().layoutFormat != ElfR32ui)
error(loc, "only supported on image with format r32i or r32ui", fnCandidate.getName().c_str(), "");
} else {
- if (fnCandidate.getName().compare(0, 19, "imageAtomicExchange") != 0)
+ if (fnCandidate.getName().compare(0, 19, "imageAtomicExchange") != 0)
error(loc, "only supported on integer images", fnCandidate.getName().c_str(), "");
else if (imageType.getQualifier().layoutFormat != ElfR32f && profile == EEsProfile)
error(loc, "only supported on image with format r32f", fnCandidate.getName().c_str(), "");
// of the texture type must match that of the constructed sampler type
// (that is, the suffixes of the type of the first argument and the
// type of the constructor will be spelled the same way)
- if (function[0].type->getBasicType() != EbtSampler ||
- ! function[0].type->getSampler().isTexture() ||
+ if (function[0].type->getBasicType() != EbtSampler ||
+ ! function[0].type->getSampler().isTexture() ||
function[0].type->isArray()) {
error(loc, "sampler-constructor first argument must be a scalar textureXXX type", token, "");
return true;
#endif
// Ordering
- if (! force && ((profile != EEsProfile && version < 420) ||
+ if (! force && ((profile != EEsProfile && version < 420) ||
(profile == EEsProfile && version < 310))
&& ! extensionTurnedOn(E_GL_ARB_shading_language_420pack)) {
// non-function parameters
// Now, modify the type of the copy, as per the type of the current redeclaration.
TQualifier& symbolQualifier = symbol->getWritableType().getQualifier();
- if (ssoPre150) {
+ if (ssoPre150) {
if (intermediate.inIoAccessed(identifier))
error(loc, "cannot redeclare after use", identifier.c_str(), "");
if (qualifier.hasLayout())
error(loc, "can only change layout qualification of", "redeclaration", symbol->getName().c_str());
if (qualifier.storage != EvqVaryingIn)
error(loc, "cannot change input storage qualification of", "redeclaration", symbol->getName().c_str());
- if (! builtIn && (publicType.pixelCenterInteger != intermediate.getPixelCenterInteger() ||
+ if (! builtIn && (publicType.pixelCenterInteger != intermediate.getPixelCenterInteger() ||
publicType.originUpperLeft != intermediate.getOriginUpperLeft()))
error(loc, "cannot redeclare with different qualification:", "redeclaration", symbol->getName().c_str());
if (publicType.pixelCenterInteger)
error(loc, "all redeclarations must use the same depth layout on", "redeclaration", symbol->getName().c_str());
}
}
-#ifdef NV_EXTENSIONS
+#ifdef NV_EXTENSIONS
else if (identifier == "gl_SampleMask") {
if (!publicType.layoutOverrideCoverage) {
error(loc, "redeclaration only allowed for override_coverage layout", "redeclaration", symbol->getName().c_str());
// go to next member
++member;
- } else {
+ } else {
// For missing members of anonymous blocks that have been redeclared,
// hide the original (shared) declaration.
// Instance-named blocks can just have the member removed.
// fix and check for member storage qualifiers and types that don't belong within a structure
for (unsigned int member = 0; member < typeList.size(); ++member) {
- TQualifier& memberQualifier = typeList[member].type->getQualifier();
+ TQualifier& memberQualifier = typeList[member].type->getQualifier();
const TSourceLoc& memberLoc = typeList[member].loc;
if (memberQualifier.isAuxiliary() ||
memberQualifier.isInterpolation() ||
// Check for stages that are enabled by extension.
// Can't do this at the beginning, it is chicken and egg to add a stage by
// extension.
- // Stage-specific features were correctly tested for already, this is just
+ // Stage-specific features were correctly tested for already, this is just
// about the stage itself.
switch (language) {
case EShLangGeometry:
error(loc, "unknown blend equation", "blend_support", "");
return;
}
-#ifdef NV_EXTENSIONS
+#ifdef NV_EXTENSIONS
if (id == "override_coverage") {
requireExtensions(loc, 1, &E_GL_NV_sample_mask_override_coverage, "sample mask override coverage");
publicType.shaderQualifiers.layoutOverrideCoverage = true;
}
std::transform(id.begin(), id.end(), id.begin(), ::tolower);
-
+
if (id == "offset") {
// "offset" can be for either
// - uniform offsets
publicType.qualifier.layoutComponent = value;
return;
} else if (id.compare(0, 4, "xfb_") == 0) {
- // "Any shader making any static use (after preprocessing) of any of these
- // *xfb_* qualifiers will cause the shader to be in a transform feedback
- // capturing mode and hence responsible for describing the transform feedback
+ // "Any shader making any static use (after preprocessing) of any of these
+ // *xfb_* qualifiers will cause the shader to be in a transform feedback
+ // capturing mode and hence responsible for describing the transform feedback
// setup."
intermediate.setXfbMode();
const char* feature = "transform feedback qualifier";
// "It is a compile-time error to specify an *xfb_buffer* that is greater than
// the implementation-dependent constant gl_MaxTransformFeedbackBuffers."
if (value >= resources.maxTransformFeedbackBuffers)
- error(loc, "buffer is too large:", id.c_str(), "gl_MaxTransformFeedbackBuffers is %d", resources.maxTransformFeedbackBuffers);
+ error(loc, "buffer is too large:", id.c_str(), "gl_MaxTransformFeedbackBuffers is %d", resources.maxTransformFeedbackBuffers);
if (value >= (int)TQualifier::layoutXfbBufferEnd)
error(loc, "buffer is too large:", id.c_str(), "internal max is %d", TQualifier::layoutXfbBufferEnd-1);
else
publicType.qualifier.layoutXfbOffset = value;
return;
} else if (id == "xfb_stride") {
- // "The resulting stride (implicit or explicit), when divided by 4, must be less than or equal to the
+ // "The resulting stride (implicit or explicit), when divided by 4, must be less than or equal to the
// implementation-dependent constant gl_MaxTransformFeedbackInterleavedComponents."
if (value > 4 * resources.maxTransformFeedbackInterleavedComponents)
error(loc, "1/4 stride is too large:", id.c_str(), "gl_MaxTransformFeedbackInterleavedComponents is %d", resources.maxTransformFeedbackInterleavedComponents);
// Merge any layout qualifier information from src into dst, leaving everything else in dst alone
//
// "More than one layout qualifier may appear in a single declaration.
-// Additionally, the same layout-qualifier-name can occur multiple times
-// within a layout qualifier or across multiple layout qualifiers in the
-// same declaration. When the same layout-qualifier-name occurs
-// multiple times, in a single declaration, the last occurrence overrides
-// the former occurrence(s). Further, if such a layout-qualifier-name
-// will effect subsequent declarations or other observable behavior, it
-// is only the last occurrence that will have any effect, behaving as if
-// the earlier occurrence(s) within the declaration are not present.
-// This is also true for overriding layout-qualifier-names, where one
-// overrides the other (e.g., row_major vs. column_major); only the last
-// occurrence has any effect."
+// Additionally, the same layout-qualifier-name can occur multiple times
+// within a layout qualifier or across multiple layout qualifiers in the
+// same declaration. When the same layout-qualifier-name occurs
+// multiple times, in a single declaration, the last occurrence overrides
+// the former occurrence(s). Further, if such a layout-qualifier-name
+// will effect subsequent declarations or other observable behavior, it
+// is only the last occurrence that will have any effect, behaving as if
+// the earlier occurrence(s) within the declaration are not present.
+// This is also true for overriding layout-qualifier-names, where one
+// overrides the other (e.g., row_major vs. column_major); only the last
+// occurrence has any effect."
//
void TParseContext::mergeObjectLayoutQualifiers(TQualifier& dst, const TQualifier& src, bool inheritOnly)
{
}
}
- // Check packing and matrix
+ // Check packing and matrix
if (qualifier.hasUniformLayout()) {
switch (qualifier.storage) {
case EvqUniform:
if (repeated >= 0)
error(loc, "overlapping offsets at", "xfb_offset", "offset %d in buffer %d", repeated, qualifier.layoutXfbBuffer);
- // "The offset must be a multiple of the size of the first component of the first
- // qualified variable or block member, or a compile-time error results. Further, if applied to an aggregate
+ // "The offset must be a multiple of the size of the first component of the first
+ // qualified variable or block member, or a compile-time error results. Further, if applied to an aggregate
// containing a double, the offset must also be a multiple of 8..."
if (type.containsBasicType(EbtDouble) && ! IsMultipleOfPow2(qualifier.layoutXfbOffset, 8))
error(loc, "type contains double; xfb_offset must be a multiple of 8", "xfb_offset", "");
error(loc, "does not apply to unsigned integer images", TQualifier::getLayoutFormatString(qualifier.layoutFormat), "");
if (profile == EEsProfile) {
- // "Except for image variables qualified with the format qualifiers r32f, r32i, and r32ui, image variables must
+ // "Except for image variables qualified with the format qualifiers r32f, r32i, and r32ui, image variables must
// specify either memory qualifier readonly or the memory qualifier writeonly."
if (! (qualifier.layoutFormat == ElfR32f || qualifier.layoutFormat == ElfR32i || qualifier.layoutFormat == ElfR32ui)) {
if (! qualifier.readonly && ! qualifier.writeonly)
if (qualifier.hasAnyLocation()) {
- // "As with input layout qualifiers, all shaders except compute shaders
- // allow *location* layout qualifiers on output variable declarations,
+ // "As with input layout qualifiers, all shaders except compute shaders
+ // allow *location* layout qualifiers on output variable declarations,
// output block declarations, and output block member declarations."
switch (qualifier.storage) {
// create list of candidates to send
TVector<const TFunction*> candidateList;
symbolTable.findFunctionNameList(call.getMangledName(), candidateList, builtIn);
-
+
// can 'from' convert to 'to'?
const auto convertible = [this](const TType& from, const TType& to, TOperator, int) -> bool {
if (from == to)
// for ambiguity reporting
bool tie = false;
-
+
// send to the generic selector
const TFunction* bestMatch = selectFunction(candidateList, call, convertible, better, tie);
return bestMatch;
}
-// When a declaration includes a type, but not a variable name, it can be
+// When a declaration includes a type, but not a variable name, it can be
// to establish defaults.
void TParseContext::declareTypeDefaults(const TSourceLoc& loc, const TPublicType& publicType)
{
// Declare the variable
if (arraySizes || type.isArray()) {
- // Arrayness is potentially coming both from the type and from the
+ // Arrayness is potentially coming both from the type and from the
// variable: "int[] a[];" or just one or the other.
// Merge it all to the type, so all arrayness is part of the type.
arrayDimCheck(loc, &type, arraySizes);
error(memberLoc, "member cannot contradict block", "stream", "");
}
- // "This includes a block's inheritance of the
- // current global default buffer, a block member's inheritance of the block's
- // buffer, and the requirement that any *xfb_buffer* declared on a block
+ // "This includes a block's inheritance of the
+ // current global default buffer, a block member's inheritance of the block's
+ // buffer, and the requirement that any *xfb_buffer* declared on a block
// member must match the buffer inherited from the block."
if (memberQualifier.hasXfbBuffer()) {
if (defaultQualification.layoutXfbBuffer != memberQualifier.layoutXfbBuffer)
}
//
-// "For a block, this process applies to the entire block, or until the first member
-// is reached that has a location layout qualifier. When a block member is declared with a location
+// "For a block, this process applies to the entire block, or until the first member
+// is reached that has a location layout qualifier. When a block member is declared with a location
// qualifier, its location comes from that qualifier: The member's location qualifier overrides the block-level
-// declaration. Subsequent members are again assigned consecutive locations, based on the newest location,
-// until the next member declared with a location qualifier. The values used for locations do not have to be
+// declaration. Subsequent members are again assigned consecutive locations, based on the newest location,
+// until the next member declared with a location qualifier. The values used for locations do not have to be
// declared in increasing order."
void TParseContext::fixBlockLocations(const TSourceLoc& loc, TQualifier& qualifier, TTypeList& typeList, bool memberWithLocation, bool memberWithoutLocation)
{
- // "If a block has no block-level location layout qualifier, it is required that either all or none of its members
+ // "If a block has no block-level location layout qualifier, it is required that either all or none of its members
// have a location layout qualifier, or a compile-time error results."
if (! qualifier.hasLocation() && memberWithLocation && memberWithoutLocation)
error(loc, "either the block needs a location, or all members need a location, or no members have a location", "location", "");
void TParseContext::fixBlockXfbOffsets(TQualifier& qualifier, TTypeList& typeList)
{
- // "If a block is qualified with xfb_offset, all its
- // members are assigned transform feedback buffer offsets. If a block is not qualified with xfb_offset, any
- // members of that block not qualified with an xfb_offset will not be assigned transform feedback buffer
+ // "If a block is qualified with xfb_offset, all its
+ // members are assigned transform feedback buffer offsets. If a block is not qualified with xfb_offset, any
+ // members of that block not qualified with an xfb_offset will not be assigned transform feedback buffer
// offsets."
if (! qualifier.hasXfbBuffer() || ! qualifier.hasXfbOffset())
qualifier.layoutXfbOffset = TQualifier::layoutXfbOffsetEnd;
}
-// Calculate and save the offset of each block member, using the recursively
+// Calculate and save the offset of each block member, using the recursively
// defined block offset rules and the user-provided offset and align.
//
-// Also, compute and save the total size of the block. For the block's size, arrayness
+// Also, compute and save the total size of the block. For the block's size, arrayness
// is not taken into account, as each element is backed by a separate buffer.
//
void TParseContext::fixBlockUniformOffsets(TQualifier& qualifier, TTypeList& typeList)
int memberAlignment = intermediate.getBaseAlignment(*typeList[member].type, memberSize, dummyStride, qualifier.layoutPacking == ElpStd140,
subMatrixLayout != ElmNone ? subMatrixLayout == ElmRowMajor : qualifier.layoutMatrix == ElmRowMajor);
if (memberQualifier.hasOffset()) {
- // "The specified offset must be a multiple
+ // "The specified offset must be a multiple
// of the base alignment of the type of the block member it qualifies, or a compile-time error results."
if (! IsMultipleOfPow2(memberQualifier.layoutOffset, memberAlignment))
error(memberLoc, "must be a multiple of the member's alignment", "offset", "");
- // GLSL: "It is a compile-time error to specify an offset that is smaller than the offset of the previous
+ // GLSL: "It is a compile-time error to specify an offset that is smaller than the offset of the previous
// member in the block or that lies within the previous member of the block"
if (spvVersion.spv == 0) {
if (memberQualifier.layoutOffset < offset)
error(memberLoc, "cannot lie in previous members", "offset", "");
- // "The offset qualifier forces the qualified member to start at or after the specified
- // integral-constant expression, which will be its byte offset from the beginning of the buffer.
- // "The actual offset of a member is computed as
+ // "The offset qualifier forces the qualified member to start at or after the specified
+ // integral-constant expression, which will be its byte offset from the beginning of the buffer.
+ // "The actual offset of a member is computed as
// follows: If offset was declared, start with that offset, otherwise start with the next available offset."
offset = std::max(offset, memberQualifier.layoutOffset);
} else {
}
}
- // "The actual alignment of a member will be the greater of the specified align alignment and the standard
+ // "The actual alignment of a member will be the greater of the specified align alignment and the standard
// (e.g., std140) base alignment for the member's type."
if (memberQualifier.hasAlign())
memberAlignment = std::max(memberAlignment, memberQualifier.layoutAlign);
// "If the resulting offset is not a multiple of the actual alignment,
- // increase it to the first offset that is a multiple of
+ // increase it to the first offset that is a multiple of
// the actual alignment."
RoundToPow2(offset, memberAlignment);
typeList[member].type->getQualifier().layoutOffset = offset;
error(loc, "can only apply to 'out'", id, "");
if (! intermediate.setVertices(publicType.shaderQualifiers.vertices))
error(loc, "cannot change previously set layout value", id, "");
-
+
if (language == EShLangTessControl)
checkIoArraysConsistency(loc);
}
// * note, that appropriately gives an error if redeclaring a block that
// was already used and hence already copied-up
//
- // - on seeing a layout declaration that sizes the array, fix everything in the
+ // - on seeing a layout declaration that sizes the array, fix everything in the
// resize-list, giving errors for mismatch
//
// - on seeing an array size declaration, give errors on mismatch between it and previous
void InitializeMemoryPools()
{
- TThreadMemoryPools* pools = static_cast<TThreadMemoryPools*>(OS_GetTLSValue(PoolIndex));
+ TThreadMemoryPools* pools = static_cast<TThreadMemoryPools*>(OS_GetTLSValue(PoolIndex));
if (pools)
return;
TPoolAllocator *threadPoolAllocator = new TPoolAllocator();
TThreadMemoryPools* threadData = new TThreadMemoryPools();
-
+
threadData->threadPoolAllocator = threadPoolAllocator;
OS_SetTLSValue(PoolIndex, threadData);
void FreeGlobalPools()
{
// Release the allocated memory for this thread.
- TThreadMemoryPools* globalPools = static_cast<TThreadMemoryPools*>(OS_GetTLSValue(PoolIndex));
+ TThreadMemoryPools* globalPools = static_cast<TThreadMemoryPools*>(OS_GetTLSValue(PoolIndex));
if (! globalPools)
return;
GetThreadPoolAllocator().popAll();
- delete &GetThreadPoolAllocator();
+ delete &GetThreadPoolAllocator();
delete globalPools;
}
// Implement the functionality of the TPoolAllocator class, which
// is documented in PoolAlloc.h.
//
-TPoolAllocator::TPoolAllocator(int growthIncrement, int allocationAlignment) :
+TPoolAllocator::TPoolAllocator(int growthIncrement, int allocationAlignment) :
pageSize(growthIncrement),
alignment(allocationAlignment),
freeList(0),
#endif
}
-
void TPoolAllocator::push()
{
tAllocState state = { currentPageOffset, inUseList };
stack.push_back(state);
-
+
//
// Indicate there is no current page to allocate from.
//
while (inUseList != page) {
// invoke destructor to free allocation list
inUseList->~tHeader();
-
+
tHeader* nextInUse = inUseList->nextPage;
if (inUseList->pageCount > 1)
delete [] reinterpret_cast<char*>(inUseList);
// size including guard blocks. In release build,
// guardBlockSize=0 and this all gets optimized away.
size_t allocationSize = TAllocation::allocationSize(numBytes);
-
+
//
// Just keep some interesting statistics.
//
// Use placement-new to initialize header
new(memory) tHeader(inUseList, 1);
inUseList = memory;
-
+
unsigned char* ret = reinterpret_cast<unsigned char*>(inUseList) + headerSkip;
currentPageOffset = (headerSkip + allocationSize + alignmentMask) & ~alignmentMask;
return initializeAllocation(inUseList, ret, numBytes);
}
-
//
// Check all allocations in a list for damage by calling check on each.
//
{
do {
consumeWhiteSpace(foundNonSpaceTab);
-
+
// if not starting a comment now, then done
int c = peek();
if (c != '/' || c == EndOfInput)
return;
- // skip potential comment
+ // skip potential comment
foundNonSpaceTab = true;
if (! consumeComment())
return;
}
lookingInMiddle = true;
- // Nominal start, skipping the desktop allowed comments and white space, but tracking if
+ // Nominal start, skipping the desktop allowed comments and white space, but tracking if
// something else was found for ES:
consumeWhitespaceComment(foundNonSpaceTab);
- if (foundNonSpaceTab)
+ if (foundNonSpaceTab)
versionNotFirst = true;
// "#"
return keyword;
case BUFFER:
- if ((parseContext.profile == EEsProfile && parseContext.version < 310) ||
+ if ((parseContext.profile == EEsProfile && parseContext.version < 310) ||
(parseContext.profile != EEsProfile && parseContext.version < 430))
return identifierOrType();
return keyword;
case U64VEC3:
case U64VEC4:
afterType = true;
- if (parseContext.symbolTable.atBuiltInLevel() ||
+ if (parseContext.symbolTable.atBuiltInLevel() ||
(parseContext.extensionTurnedOn(E_GL_ARB_gpu_shader_int64) &&
parseContext.profile != EEsProfile && parseContext.version >= 450))
return keyword;
return keyword;
case PRECISE:
- if ((parseContext.profile == EEsProfile && parseContext.extensionsTurnedOn(Num_AEP_gpu_shader5, AEP_gpu_shader5)) ||
+ if ((parseContext.profile == EEsProfile && parseContext.extensionsTurnedOn(Num_AEP_gpu_shader5, AEP_gpu_shader5)) ||
(parseContext.profile != EEsProfile && parseContext.version >= 400))
return keyword;
if (parseContext.profile == EEsProfile && parseContext.version == 310) {
int TScanContext::firstGenerationImage(bool inEs310)
{
- if (parseContext.symbolTable.atBuiltInLevel() ||
+ if (parseContext.symbolTable.atBuiltInLevel() ||
(parseContext.profile != EEsProfile && (parseContext.version >= 420 || parseContext.extensionTurnedOn(E_GL_ARB_shader_image_load_store))) ||
(inEs310 && parseContext.profile == EEsProfile && parseContext.version >= 310))
return keyword;
return keyword;
}
- if (parseContext.symbolTable.atBuiltInLevel() ||
- (parseContext.profile != EEsProfile &&
+ if (parseContext.symbolTable.atBuiltInLevel() ||
+ (parseContext.profile != EEsProfile &&
(parseContext.version >= 420 || parseContext.extensionTurnedOn(E_GL_ARB_shader_image_load_store))))
return keyword;
size_t currentChar;
// This is for reporting what string/line an error occurred on, and can be overridden by #line.
- // It remembers the last state of each source string as it is left for the next one, so unget()
+ // It remembers the last state of each source string as it is left for the next one, so unget()
// can restore that state.
TSourceLoc* loc; // an array
EPcCount
};
-// A process-global symbol table per version per profile for built-ins common
-// to multiple stages (languages), and a process-global symbol table per version
+// A process-global symbol table per version per profile for built-ins common
+// to multiple stages (languages), and a process-global symbol table per version
// per profile per stage for built-ins unique to each stage. They will be sparsely
// populated, so they will only be generated as needed.
-//
+//
// Each has a different set of built-ins, and we want to preserve that from
// compile to compile.
//
EShSource source, TInfoSink& infoSink, TSymbolTable& symbolTable)
{
TIntermediate intermediate(language, version, profile);
-
+
intermediate.setSource(source);
- std::unique_ptr<TParseContextBase> parseContext(CreateParseContext(symbolTable, intermediate, version, profile, source,
+ std::unique_ptr<TParseContextBase> parseContext(CreateParseContext(symbolTable, intermediate, version, profile, source,
language, infoSink, spvVersion, true, EShMsgDefault,
true));
TScanContext scanContext(*parseContext);
parseContext->setScanContext(&scanContext);
parseContext->setPpContext(&ppContext);
-
+
//
// Push the symbol table to give it an initial scope. This
// push should not have a corresponding pop, so that built-ins
if (builtInLengths[0] == 0)
return true;
-
+
TInputScanner input(1, builtInShaders, builtInLengths);
if (! parseContext->parseShaderStrings(ppContext, input) != 0) {
infoSink.info.message(EPrefixInternalError, "Unable to parse built-ins");
EProfile profile, const SpvVersion& spvVersion, EShLanguage language, EShSource source)
{
std::unique_ptr<TBuiltInParseables> builtInParseables(CreateBuiltInParseables(infoSink, source));
-
+
builtInParseables->initialize(*resources, version, profile, spvVersion, language);
InitializeSymbolTable(builtInParseables->getCommonString(), version, profile, spvVersion, language, source, infoSink, symbolTable);
builtInParseables->identifyBuiltIns(version, profile, spvVersion, language, symbolTable, *resources);
}
//
-// To do this on the fly, we want to leave the current state of our thread's
+// To do this on the fly, we want to leave the current state of our thread's
// pool allocator intact, so:
// - Switch to a new pool for parsing the built-ins
// - Do the parsing, which builds the symbol table, using the new pool
[versionIndex][spvVersionIndex][profileIndex][sourceIndex][CommonIndex(profile, (EShLanguage)stage)]);
SharedSymbolTables[versionIndex][spvVersionIndex][profileIndex][sourceIndex][stage]->copyTable(*stageTables[stage]);
SharedSymbolTables[versionIndex][spvVersionIndex][profileIndex][sourceIndex][stage]->readOnly();
- }
+ }
}
// Clean up the local tables before deleting the pool they used.
profile = ECoreProfile;
else
profile = ENoProfile;
- }
+ }
// else: typical desktop case... e.g., "#version 410 core"
}
}
if (numStrings == 0)
return true;
-
+
// Move to length-based strings, rather than null-terminated strings.
// Also, add strings to include the preamble and to ensure the shader is not null,
// which lets the grammar accept what was a null (post preprocessing) shader.
if (spvVersion.vulkan >= 100)
intermediate.setOriginUpperLeft();
SetupBuiltinSymbolTable(version, profile, spvVersion, source);
-
+
TSymbolTable* cachedTable = SharedSymbolTables[MapVersionToIndex(version)]
[MapSpvVersionToIndex(spvVersion)]
[MapProfileToIndex(profile)]
[MapSourceToIndex(source)]
[compiler->getLanguage()];
-
+
// Dynamically allocate the symbol table so we can control when it is deallocated WRT the pool.
TSymbolTable* symbolTableMemory = new TSymbolTable;
TSymbolTable& symbolTable = *symbolTableMemory;
if (cachedTable)
symbolTable.adoptLevels(*cachedTable);
-
+
// Add built-in symbols that are potentially context dependent;
// they get popped again further down.
AddContextSpecificSymbols(resources, compiler->infoSink, symbolTable, version, profile, spvVersion,
compiler->getLanguage(), source);
-
+
//
// Now we can process the full shader under proper symbols and rules.
//
}
parseContext->initializeExtensionBehavior();
-
+
// Fill in the strings as outlined above.
std::string preamble;
parseContext->getPreamble(preamble);
bool operator()(TParseContextBase& parseContext, TPpContext& ppContext,
TInputScanner& fullInput, bool versionWillBeError,
TSymbolTable&, TIntermediate& intermediate,
- EShOptimizationLevel optLevel, EShMessages messages)
+ EShOptimizationLevel optLevel, EShMessages messages)
{
bool success = true;
// Parse the full shader.
false, includer);
}
-
//
// do a partial compile on the given strings for a single compilation unit
// for a potential deferred link into a single stage (and deferred full compile of that
// all preprocessing, parsing, semantic checks, etc. for a single compilation unit
// are done here.
//
-// return: the tree and other information is filled into the intermediate argument,
+// return: the tree and other information is filled into the intermediate argument,
// and true is returned by the function for success.
//
bool CompileDeferred(
} // end anonymous namespace for local functions
-
//
// ShInitialize() should be called exactly once per process, not per thread.
//
return 0;
TShHandleBase* base = static_cast<TShHandleBase*>(ConstructCompiler(language, debugOptions));
-
+
return reinterpret_cast<void*>(base);
}
}
if (base->getAsCompiler())
cObjects.push_back(base->getAsCompiler());
-
-
+
if (cObjects[i] == 0)
return 0;
}
for (int i = 0; i < numHandles; ++i) {
if (cObjects[i]->getAsCompiler()) {
if (! cObjects[i]->getAsCompiler()->linkable()) {
- linker->infoSink.info.message(EPrefixError, "Not all shaders have valid object code.");
+ linker->infoSink.info.message(EPrefixError, "Not all shaders have valid object code.");
return 0;
}
}
return 0;
TShHandleBase* base = reinterpret_cast<TShHandleBase*>(handle);
-
+
TLinker* linker = static_cast<TLinker*>(base->getAsLinker());
if (linker == 0)
return 0;
// success or failure.
//
int ShSetVirtualAttributeBindings(const ShHandle handle, const ShBindingTable* table)
-{
+{
if (!InitThread())
return 0;
if (linker == 0)
return 0;
-
+
linker->setAppAttributeBindings(table);
return 1;
//
// Below is a new alternate C++ interface that might potentially replace the above
// opaque handle-based interface.
-//
+//
// See more detailed comment in ShaderLang.h
//
{
if (! InitThread())
return false;
-
+
pool = new TPoolAllocator();
SetThreadPoolAllocator(*pool);
if (! preamble)
linked = true;
bool error = false;
-
+
pool = new TPoolAllocator();
SetThreadPoolAllocator(*pool);
//
bool TProgram::buildReflection()
-{
+{
if (! linked || reflection)
return false;
const TString *name;
unsigned int uniqueId; // For cross-scope comparing during code generation
- // For tracking what extensions must be present
+ // For tracking what extensions must be present
// (don't use if correct version/profile is present).
int numExtensions;
const char** extensions; // an array of pointers to existing constant char strings
class TVariable : public TSymbol {
public:
TVariable(const TString *name, const TType& t, bool uT = false )
- : TSymbol(name),
+ : TSymbol(name),
userType(uT),
constSubtree(nullptr),
anonId(-1) { type.shallowCopy(t); }
TString *name;
TType* type;
TIntermTyped* defaultValue;
- void copyParam(const TParameter& param)
+ void copyParam(const TParameter& param)
{
if (param.name)
name = NewPoolTString(param.name->c_str());
virtual const TAnonMember* getAsAnonMember() const { return this; }
virtual const TVariable& getAnonContainer() const { return anonContainer; }
virtual unsigned int getMemberNumber() const { return memberNumber; }
-
+
virtual const TType& getType() const
{
const TTypeList& types = *anonContainer.getType().getStruct();
const TTypeList& types = *anonContainer.getType().getStruct();
return *types[memberNumber].type;
}
-
+
virtual int getAnonId() const { return anonId; }
virtual void dump(TInfoSink &infoSink) const;
TSymbol* find(const TString& name) const
{
tLevel::const_iterator it = level.find(name);
- if (it == level.end())
+ if (it == level.end())
return 0;
else
return (*it).second;
while (table.size() > adoptedLevels)
pop(0);
}
-
+
void adoptLevels(TSymbolTable& symTable)
{
for (unsigned int level = 0; level < symTable.table.size(); ++level) {
void setNoBuiltInRedeclarations() { noBuiltInRedeclarations = true; }
void setSeparateNameSpaces() { separateNameSpaces = true; }
-
+
void push()
{
table.push_back(new TSymbolTableLevel);
// make sure there isn't a function of this variable name
if (! separateNameSpaces && ! symbol.getAsFunction() && table[currentLevel()]->hasFunctionName(symbol.getName()))
return false;
-
+
// check for not overloading or redefining a built-in function
if (noBuiltInRedeclarations) {
if (atGlobalLevel() && currentLevel() > 0) {
//
// To allocate an internal temporary, which will need to be uniquely
- // identified by the consumer of the AST, but never need to
+ // identified by the consumer of the AST, but never need to
// found by doing a symbol table search by name, hence allowed an
// arbitrary name in the symbol with no worry of collision.
//
for (unsigned int level = 0; level < table.size(); ++level)
table[level]->relateToOperator(name, op);
}
-
+
void setFunctionExtensions(const char* name, int num, const char* const extensions[])
{
for (unsigned int level = 0; level < table.size(); ++level)
//
// Help manage multiple profiles, versions, extensions etc.
//
-// These don't return error codes, as the presumption is parsing will
+// These don't return error codes, as the presumption is parsing will
// always continue as if the tested feature were enabled, and thus there
// is no error recovery needed.
//
//
// To add a new hypothetical "Feature F" to the front end, where an extension
// "XXX_extension_X" can be used to enable the feature, do the following.
-//
+//
// OVERVIEW: Specific features are what are error-checked for, not
-// extensions: A specific Feature F might be enabled by an extension, or a
+// extensions: A specific Feature F might be enabled by an extension, or a
// particular version in a particular profile, or a stage, or combinations, etc.
-//
-// The basic mechanism is to use the following to "declare" all the things that
+//
+// The basic mechanism is to use the following to "declare" all the things that
// enable/disable Feature F, in a code path that implements Feature F:
-//
+//
// requireProfile()
// profileRequires()
// requireStage()
// checkDeprecated()
// requireNotRemoved()
// requireExtensions()
-//
-// Typically, only the first two calls are needed. They go into a code path that
-// implements Feature F, and will log the proper error/warning messages. Parsing
+//
+// Typically, only the first two calls are needed. They go into a code path that
+// implements Feature F, and will log the proper error/warning messages. Parsing
// will then always continue as if the tested feature was enabled.
-//
+//
// There is typically no if-testing or conditional parsing, just insertion of the calls above.
// However, if symbols specific to the extension are added (step 5), they will
// only be added under tests that the minimum version and profile are present.
// 1) Add a symbol name for the extension string at the bottom of Versions.h:
//
// const char* const XXX_extension_X = "XXX_extension_X";
-//
+//
// 2) Add extension initialization to TParseVersions::initializeExtensionBehavior(),
// the first function below:
-//
+//
// extensionBehavior[XXX_extension_X] = EBhDisable;
//
// 3) Add any preprocessor directives etc. in the next function, TParseVersions::getPreamble():
// "#define XXX_extension_X 1\n"
//
// The new-line is important, as that ends preprocess tokens.
-//
+//
// 4) Insert a profile check in the feature's path (unless all profiles support the feature,
// for some version level). That is, call requireProfile() to constrain the profiles, e.g.:
-//
+//
// // ... in a path specific to Feature F...
// requireProfile(loc,
// ECoreProfile | ECompatibilityProfile,
// "Feature F");
-//
+//
// 5) For each profile that supports the feature, insert version/extension checks:
-//
+//
// The mostly likely scenario is that Feature F can only be used with a
// particular profile if XXX_extension_X is present or the version is
// high enough that the core specification already incorporated it.
-//
+//
// // following the requireProfile() call...
-// profileRequires(loc,
+// profileRequires(loc,
// ECoreProfile | ECompatibilityProfile,
// 420, // 0 if no version incorporated the feature into the core spec.
// XXX_extension_X, // can be a list of extensions that all add the feature
// "Feature F Description");
-//
+//
// This allows the feature if either A) one of the extensions is enabled or
// B) the version is high enough. If no version yet incorporates the feature
// into core, pass in 0.
-//
+//
// This can be called multiple times, if different profiles support the
-// feature starting at different version numbers or with different
+// feature starting at different version numbers or with different
// extensions.
-//
+//
// This must be called for each profile allowed by the initial call to requireProfile().
-//
+//
// Profiles are all masks, which can be "or"-ed together.
-//
+//
// ENoProfile
// ECoreProfile
// ECompatibilityProfile
// EEsProfile
-//
+//
// The ENoProfile profile is only for desktop, before profiles showed up in version 150;
// All other #version with no profile default to either es or core, and so have profiles.
-//
+//
// You can select all but a particular profile using ~. The following basically means "desktop":
-//
+//
// ~EEsProfile
//
// 6) If built-in symbols are added by the extension, add them in Initialize.cpp: Their use
extensionBehavior[E_GL_EXT_frag_depth] = EBhDisable;
extensionBehavior[E_GL_OES_EGL_image_external] = EBhDisable;
extensionBehavior[E_GL_EXT_shader_texture_lod] = EBhDisable;
-
+
extensionBehavior[E_GL_ARB_texture_rectangle] = EBhDisable;
extensionBehavior[E_GL_3DL_array_objects] = EBhDisable;
extensionBehavior[E_GL_ARB_shading_language_420pack] = EBhDisable;
extensionBehavior[E_GL_AMD_gpu_shader_half_float] = EBhDisable;
#endif
-#ifdef NV_EXTENSIONS
+#ifdef NV_EXTENSIONS
extensionBehavior[E_GL_NV_sample_mask_override_coverage] = EBhDisable;
extensionBehavior[E_SPV_NV_geometry_shader_passthrough] = EBhDisable;
#endif
void TParseVersions::getPreamble(std::string& preamble)
{
if (profile == EEsProfile) {
- preamble =
+ preamble =
"#define GL_ES 1\n"
"#define GL_FRAGMENT_PRECISION_HIGH 1\n"
"#define GL_OES_texture_3D 1\n"
"#define GL_EXT_shader_non_constant_global_initializers 1\n"
;
} else {
- preamble =
+ preamble =
"#define GL_FRAGMENT_PRECISION_HIGH 1\n"
"#define GL_ARB_texture_rectangle 1\n"
"#define GL_ARB_shading_language_420pack 1\n"
"#define GL_AMD_gpu_shader_half_float 1\n"
#endif
-#ifdef NV_EXTENSIONS
+#ifdef NV_EXTENSIONS
"#define GL_NV_sample_mask_override_coverage 1\n"
"#define GL_NV_geometry_shader_passthrough 1\n"
#endif
}
// #line and #include
- preamble +=
+ preamble +=
"#define GL_GOOGLE_cpp_style_line_directive 1\n"
"#define GL_GOOGLE_include_directive 1\n"
;
//
// When to use requireProfile():
//
-// Use if only some profiles support a feature. However, if within a profile the feature
+// Use if only some profiles support a feature. However, if within a profile the feature
// is version or extension specific, follow this call with calls to profileRequires().
//
// Operation: If the current profile is not one of the profileMask,
//
// Map from stage enum to externally readable text name.
//
-const char* StageName(EShLanguage stage)
+const char* StageName(EShLanguage stage)
{
switch(stage) {
case EShLangVertex: return "vertex";
// Operation: Will issue warnings/errors based on the current profile, version, and extension
// behaviors. It only checks extensions when the current profile is one of the profileMask.
//
-// A minVersion of 0 means no version of the profileMask support this in core,
+// A minVersion of 0 means no version of the profileMask support this in core,
// the extension must be present.
//
//
// When to use requireStage()
//
-// If only some stages support a feature.
+// If only some stages support a feature.
//
// Operation: If the current stage is not present, give an error message.
//
// Call for any operation needing full GLSL integer data-type support.
void TParseVersions::fullIntegerCheck(const TSourceLoc& loc, const char* op)
{
- profileRequires(loc, ENoProfile, 130, nullptr, op);
+ profileRequires(loc, ENoProfile, 130, nullptr, op);
profileRequires(loc, EEsProfile, 300, nullptr, op);
}
// check binary operations for those modifying the loop index
bool TInductiveTraverser::visitBinary(TVisit /* visit */, TIntermBinary* node)
{
- if (node->modifiesState() && node->getLeft()->getAsSymbolNode() &&
+ if (node->modifiesState() && node->getLeft()->getAsSymbolNode() &&
node->getLeft()->getAsSymbolNode()->getId() == loopId) {
bad = true;
badLoc = node->getLoc();
// check unary operations for those modifying the loop index
bool TInductiveTraverser::visitUnary(TVisit /* visit */, TIntermUnary* node)
{
- if (node->modifiesState() && node->getOperand()->getAsSymbolNode() &&
+ if (node->modifiesState() && node->getOperand()->getAsSymbolNode() &&
node->getOperand()->getAsSymbolNode()->getId() == loopId) {
bad = true;
badLoc = node->getLoc();
//
// The "constant-index-expression" tranverser.
//
-// Just look at things that can form an index.
+// Just look at things that can form an index.
//
class TIndexTraverser : public TIntermTraverser {
#include "../Include/InfoSink.h"
namespace glslang {
-
+
//
// Link-time error emitter.
//
infoSink.info << "Linking " << StageName(language) << " stage: " << message << "\n";
}
-// TODO: 4.4 offset/align: "Two blocks linked together in the same program with the same block
-// name must have the exact same set of members qualified with offset and their integral-constant
+// TODO: 4.4 offset/align: "Two blocks linked together in the same program with the same block
+// name must have the exact same set of members qualified with offset and their integral-constant
// expression values must be the same, or a link-time error results."
//
inputPrimitive = unit.inputPrimitive;
else if (inputPrimitive != unit.inputPrimitive)
error(infoSink, "Contradictory input layout primitives");
-
+
if (outputPrimitive == ElgNone)
outputPrimitive = unit.outputPrimitive;
else if (outputPrimitive != unit.outputPrimitive)
error(infoSink, "Contradictory output layout primitives");
-
+
if (vertices == TQualifier::layoutNotSet)
vertices = unit.vertices;
else if (vertices != unit.vertices) {
}
// Getting this far means we have two existing trees to merge...
-
+
version = std::max(version, unit.version);
requestedExtensions.insert(unit.requestedExtensions.begin(), unit.requestedExtensions.end());
writeTypeComparison = true;
}
- // Layouts...
- // TODO: 4.4 enhanced layouts: Generalize to include offset/align: current spec
- // requires separate user-supplied offset from actual computed offset, but
+ // Layouts...
+ // TODO: 4.4 enhanced layouts: Generalize to include offset/align: current spec
+ // requires separate user-supplied offset from actual computed offset, but
// current implementation only has one offset.
if (symbol.getQualifier().layoutMatrix != unitSymbol.getQualifier().layoutMatrix ||
symbol.getQualifier().layoutPacking != unitSymbol.getQualifier().layoutPacking ||
if (xfbBuffers[b].containsDouble)
RoundToPow2(xfbBuffers[b].implicitStride, 8);
- // "It is a compile-time or link-time error to have
+ // "It is a compile-time or link-time error to have
// any xfb_offset that overflows xfb_stride, whether stated on declarations before or after the xfb_stride, or
// in different compilation units. While xfb_stride can be declared multiple times for the same buffer, it is a
// compile-time or link-time error to have different values specified for the stride for the same buffer."
if (xfbBuffers[b].stride == TQualifier::layoutXfbStrideEnd)
xfbBuffers[b].stride = xfbBuffers[b].implicitStride;
- // "If the buffer is capturing any
- // outputs with double-precision components, the stride must be a multiple of 8, otherwise it must be a
+ // "If the buffer is capturing any
+ // outputs with double-precision components, the stride must be a multiple of 8, otherwise it must be a
// multiple of 4, or a compile-time or link-time error results."
if (xfbBuffers[b].containsDouble && ! IsMultipleOfPow2(xfbBuffers[b].stride, 8)) {
error(infoSink, "xfb_stride must be multiple of 8 for buffer holding a double:");
infoSink.info << " xfb_buffer " << (unsigned int)b << ", xfb_stride " << xfbBuffers[b].stride << "\n";
}
- // "The resulting stride (implicit or explicit), when divided by 4, must be less than or equal to the
+ // "The resulting stride (implicit or explicit), when divided by 4, must be less than or equal to the
// implementation-dependent constant gl_MaxTransformFeedbackInterleavedComponents."
if (xfbBuffers[b].stride > (unsigned int)(4 * resources.maxTransformFeedbackInterleavedComponents)) {
error(infoSink, "xfb_stride is too large:");
break;
// Otherwise, we found a new subgraph, process it:
- // See what all can be reached by this new root, and if any of
+ // See what all can be reached by this new root, and if any of
// that is recursive. This is done by depth-first traversals, seeing
// if a new call is found that was already in the currentPath (a back edge),
// thereby detecting recursion.
}
// See if a variable was both a user-declared output and used.
-// Note: the spec discusses writing to one, but this looks at read or write, which
+// Note: the spec discusses writing to one, but this looks at read or write, which
// is more useful, and perhaps the spec should be changed to reflect that.
bool TIntermediate::userOutputUsed() const
{
const TIntermSymbol& symbolNode = *linkerObjects[i]->getAsSymbolNode();
if (symbolNode.getQualifier().storage == EvqVaryingOut &&
symbolNode.getName().compare(0, 3, "gl_") != 0 &&
- inIoAccessed(symbolNode.getName())) {
+ inIoAccessed(symbolNode.getName())) {
found = true;
break;
}
// Return the size of type, as measured by "locations".
int TIntermediate::computeTypeLocationSize(const TType& type) const
{
- // "If the declared input is an array of size n and each element takes m locations, it will be assigned m * n
+ // "If the declared input is an array of size n and each element takes m locations, it will be assigned m * n
// consecutive locations..."
if (type.isArray()) {
// TODO: perf: this can be flattened by using getCumulativeArraySize(), and a deref that discards all arrayness
return type.getOuterArraySize() * computeTypeLocationSize(elementType);
}
- // "The locations consumed by block and structure members are determined by applying the rules above
- // recursively..."
+ // "The locations consumed by block and structure members are determined by applying the rules above
+ // recursively..."
if (type.isStruct()) {
int size = 0;
for (int member = 0; member < (int)type.getStruct()->size(); ++member) {
// ES: "If a shader input is any scalar or vector type, it will consume a single location."
- // Desktop: "If a vertex shader input is any scalar or vector type, it will consume a single location. If a non-vertex
- // shader input is a scalar or vector type other than dvec3 or dvec4, it will consume a single location, while
- // types dvec3 or dvec4 will consume two consecutive locations. Inputs of type double and dvec2 will
+ // Desktop: "If a vertex shader input is any scalar or vector type, it will consume a single location. If a non-vertex
+ // shader input is a scalar or vector type other than dvec3 or dvec4, it will consume a single location, while
+ // types dvec3 or dvec4 will consume two consecutive locations. Inputs of type double and dvec2 will
// consume only a single location, in all stages."
if (type.isScalar())
return 1;
}
// "If the declared input is an n x m single- or double-precision matrix, ...
- // The number of locations assigned for each matrix will be the same as
+ // The number of locations assigned for each matrix will be the same as
// for an n-element array of m-component vectors..."
if (type.isMatrix()) {
TType columnType(type, 0);
// N.B. Caller must set containsDouble to false before calling.
unsigned int TIntermediate::computeTypeXfbSize(const TType& type, bool& containsDouble) const
{
- // "...if applied to an aggregate containing a double, the offset must also be a multiple of 8,
+ // "...if applied to an aggregate containing a double, the offset must also be a multiple of 8,
// and the space taken in the buffer will be a multiple of 8.
- // ...within the qualified entity, subsequent components are each
+ // ...within the qualified entity, subsequent components are each
// assigned, in order, to the next available offset aligned to a multiple of
// that component's size. Aggregate types are flattened down to the component
// level to get this sequence of components."
- if (type.isArray()) {
+ if (type.isArray()) {
// TODO: perf: this can be flattened by using getCumulativeArraySize(), and a deref that discards all arrayness
assert(type.isExplicitlySizedArray());
TType elementType(type, 0);
bool structContainsDouble = false;
for (int member = 0; member < (int)type.getStruct()->size(); ++member) {
TType memberType(type, member);
- // "... if applied to
- // an aggregate containing a double, the offset must also be a multiple of 8,
+ // "... if applied to
+ // an aggregate containing a double, the offset must also be a multiple of 8,
// and the space taken in the buffer will be a multiple of 8."
bool memberContainsDouble = false;
int memberSize = computeTypeXfbSize(memberType, memberContainsDouble);
// Implement base-alignment and size rules from section 7.6.2.2 Standard Uniform Block Layout
// Operates recursively.
//
-// If std140 is true, it does the rounding up to vec4 size required by std140,
+// If std140 is true, it does the rounding up to vec4 size required by std140,
// otherwise it does not, yielding std430 rules.
//
// The size is returned in the 'size' parameter
//
// 1. If the member is a scalar consuming N basic machine units, the base alignment is N.
//
- // 2. If the member is a two- or four-component vector with components consuming N basic
+ // 2. If the member is a two- or four-component vector with components consuming N basic
// machine units, the base alignment is 2N or 4N, respectively.
//
// 3. If the member is a three-component vector with components consuming N
// the array is rounded up to the next multiple of the base alignment.
//
// 5. If the member is a column-major matrix with C columns and R rows, the
- // matrix is stored identically to an array of C column vectors with R
+ // matrix is stored identically to an array of C column vectors with R
// components each, according to rule (4).
//
// 6. If the member is an array of S column-major matrices with C columns and
//
// 9. If the member is a structure, the base alignment of the structure is N , where
// N is the largest base alignment value of any of its members, and rounded
- // up to the base alignment of a vec4. The individual members of this substructure
+ // up to the base alignment of a vec4. The individual members of this substructure
// are then assigned offsets by applying this set of rules recursively,
// where the base offset of the first member of the sub-structure is equal to the
// aligned offset of the structure. The structure may have padding at the end;
int memberAlignment = getBaseAlignment(*memberList[m].type, memberSize, dummyStride, std140,
(subMatrixLayout != ElmNone) ? (subMatrixLayout == ElmRowMajor) : rowMajor);
maxAlignment = std::max(maxAlignment, memberAlignment);
- RoundToPow2(size, memberAlignment);
+ RoundToPow2(size, memberAlignment);
size += memberSize;
}
case 2:
size *= 2;
return 2 * scalarAlign;
- default:
+ default:
size *= type.getVectorSize();
return 4 * scalarAlign;
}
if (type.isMatrix()) {
// rule 5: deref to row, not to column, meaning the size of vector is num columns instead of num rows
TType derefType(type, 0, rowMajor);
-
+
alignment = getBaseAlignment(derefType, size, dummyStride, std140, rowMajor);
if (std140)
alignment = std::max(baseAlignmentVec4Std140, alignment);
// A generic 1-D range.
struct TRange {
TRange(int start, int last) : start(start), last(last) { }
- bool overlap(const TRange& rhs) const
+ bool overlap(const TRange& rhs) const
{
return last >= rhs.start && start <= rhs.last;
}
shiftUboBinding(0),
autoMapBindings(false),
flattenUniformArrays(false),
-#ifdef NV_EXTENSIONS
+#ifdef NV_EXTENSIONS
layoutOverrideCoverage(false),
geoPassthroughEXT(false),
#endif
bool getFlattenUniformArrays() const { return flattenUniformArrays; }
void setNoStorageFormat(bool b) { useUnknownFormat = b; }
bool getNoStorageFormat() const { return useUnknownFormat; }
-
+
void setVersion(int v) { version = v; }
int getVersion() const { return version; }
void setProfile(EProfile p) { profile = p; }
void addSymbolLinkageNodes(TIntermAggregate*& linkage, EShLanguage, TSymbolTable&);
void addSymbolLinkageNode(TIntermAggregate*& linkage, const TSymbol&);
- bool setInvocations(int i)
+ bool setInvocations(int i)
{
if (invocations != TQualifier::layoutNotSet)
return invocations == i;
TVertexOrder getVertexOrder() const { return vertexOrder; }
void setPointMode() { pointMode = true; }
bool getPointMode() const { return pointMode; }
-
+
bool setLocalSize(int dim, int size)
{
if (localSize[dim] > 1)
static int getBaseAlignment(const TType&, int& size, int& stride, bool std140, bool rowMajor);
bool promote(TIntermOperator*);
-#ifdef NV_EXTENSIONS
+#ifdef NV_EXTENSIONS
void setLayoutOverrideCoverage() { layoutOverrideCoverage = true; }
bool getLayoutOverrideCoverage() const { return layoutOverrideCoverage; }
void setGeoPassthroughEXT() { geoPassthroughEXT = true; }
bool promoteBinary(TIntermBinary&);
void addSymbolLinkageNode(TIntermAggregate*& linkage, TSymbolTable&, const TString&);
bool promoteAggregate(TIntermAggregate&);
-
+
const EShLanguage language; // stage, known at construction time
EShSource source; // source language, known a bit later
std::string entryPointName;
bool xfbMode;
bool multiStream;
-#ifdef NV_EXTENSIONS
+#ifdef NV_EXTENSIONS
bool layoutOverrideCoverage;
bool geoPassthroughEXT;
#endif
//
//
-// Travarse a tree of constants to create a single folded constant.
+// Traverse a tree of constants to create a single folded constant.
// It should only be used when the whole tree is known to be constant.
//
public:
TConstTraverser(const TConstUnionArray& cUnion, bool singleConstParam, TOperator constructType, const TType& t)
: unionArray(cUnion), type(t),
- constructorType(constructType), singleConstantParam(singleConstParam), error(false), isMatrix(false),
+ constructorType(constructType), singleConstantParam(singleConstParam), error(false), isMatrix(false),
matrixCols(0), matrixRows(0) { index = 0; tOp = EOpNull; }
virtual void visitConstantUnion(TIntermConstantUnion* node);
if (! node->isConstructor() && node->getOp() != EOpComma) {
error = true;
- return false;
+ return false;
}
if (node->getSequence().size() == 0) {
bool flag = node->getSequence().size() == 1 && node->getSequence()[0]->getAsTyped()->getAsConstantUnion();
if (flag) {
- singleConstantParam = true;
+ singleConstantParam = true;
constructorType = node->getOp();
size = node->getType().computeNumComponents();
matrixCols = node->getType().getMatrixCols();
matrixRows = node->getType().getMatrixRows();
}
- }
+ }
- for (TIntermSequence::iterator p = node->getSequence().begin();
+ for (TIntermSequence::iterator p = node->getSequence().begin();
p != node->getSequence().end(); p++) {
if (node->getOp() == EOpComma)
- index = 0;
+ index = 0;
(*p)->traverse(this);
- }
- if (flag)
+ }
+ if (flag)
{
- singleConstantParam = false;
+ singleConstantParam = false;
constructorType = EOpNull;
size = 0;
isMatrix = false;
if (! singleConstantParam) {
int rightUnionSize = node->getType().computeNumComponents();
-
+
const TConstUnionArray& rightUnionArray = node->getConstArray();
for (int i = 0; i < rightUnionSize; i++) {
if (index >= instanceSize)
leftUnionArray[i] = rightUnionArray[count];
(index)++;
-
+
if (nodeComps > 1)
count++;
}
return;
if (i == startIndex || (i - startIndex) % (matrixRows + 1) == 0 )
leftUnionArray[i] = rightUnionArray[count];
- else
+ else
leftUnionArray[i].setDConst(0.0);
index++;
if (nodeComps > 1)
- count++;
+ count++;
}
}
}
return false;
TConstTraverser it(unionArray, singleConstantParam, constructorType, t);
-
+
root->traverse(&it);
if (it.error)
return true;
express or implied, are granted by NVIDIA herein, including but not
limited to any patent rights that may be infringed by your derivative
works or by other works in which the NVIDIA Software may be
-incorporated. No hardware is licensed hereunder.
+incorporated. No hardware is licensed hereunder.
THE NVIDIA SOFTWARE IS BEING PROVIDED ON AN "AS IS" BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED,
mac.emptyArgs = 1;
do {
token = scanToken(ppToken);
- if (mac.args.size() == 0 && token == ')')
+ if (mac.args.size() == 0 && token == ')')
break;
if (token != PpAtomIdentifier) {
parseContext.ppError(ppToken->loc, "bad argument", "#define", "");
newToken = ReadToken(mac.body, &newPpToken);
if (oldToken != newToken || oldPpToken != newPpToken) {
parseContext.ppError(defineLoc, "Macro redefined; different substitutions:", "#define", atomStrings.getString(defAtom));
- break;
+ break;
}
} while (newToken > 0);
}
int nextAtom = atomStrings.getAtom(ppToken->name);
if (nextAtom == PpAtomIf || nextAtom == PpAtomIfdef || nextAtom == PpAtomIfndef) {
- depth++;
- ifdepth++;
+ depth++;
+ ifdepth++;
elsetracker++;
} else if (nextAtom == PpAtomEndif) {
token = extraTokenCheck(nextAtom, ppToken, scanToken(ppToken));
--elsetracker;
if (depth == 0) {
// found the #endif we are looking for
- if (ifdepth)
+ if (ifdepth)
--ifdepth;
break;
}
// Perform evaluation of binary operation, if there is one, otherwise we are done.
while (! err) {
- if (token == ')' || token == '\n')
+ if (token == ')' || token == '\n')
break;
int op;
for (op = NUM_ELEMENTS(binop) - 1; op >= 0; op--) {
}
// Handle #if
-int TPpContext::CPPif(TPpToken* ppToken)
+int TPpContext::CPPif(TPpToken* ppToken)
{
int token = scanToken(ppToken);
elsetracker++;
if (token != PpAtomIdentifier) {
if (defined)
parseContext.ppError(ppToken->loc, "must be followed by macro name", "#ifdef", "");
- else
+ else
parseContext.ppError(ppToken->loc, "must be followed by macro name", "#ifndef", "");
} else {
MacroSymbol* macro = lookupMacroDef(atomStrings.getAtom(ppToken->name));
epilogue << (res->file_data[res->file_length - 1] == '\n'? "" : "\n") <<
"#line " << directiveLoc.line + forNextLine << " " << directiveLoc.getStringNameOrNum() << "\n";
pushInput(new TokenizableIncludeFile(directiveLoc, prologue.str(), res, epilogue.str(), this));
+ // There's no "current" location anymore.
parseContext.setCurrentColumn(0);
} else {
// things are okay, but there is nothing to process
}
// Handle #line
-int TPpContext::CPPline(TPpToken* ppToken)
+int TPpContext::CPPline(TPpToken* ppToken)
{
// "#line must have, after macro substitution, one of the following forms:
// "#line line
}
// Handle #error
-int TPpContext::CPPerror(TPpToken* ppToken)
+int TPpContext::CPPerror(TPpToken* ppToken)
{
int token = scanToken(ppToken);
std::string message;
if (elseSeen[elsetracker])
parseContext.ppError(ppToken->loc, "#elif after #else", "#elif", "");
// this token is really a dont care, but we still need to eat the tokens
- token = scanToken(ppToken);
+ token = scanToken(ppToken);
while (token != '\n' && token != EndOfInput)
token = scanToken(ppToken);
token = CPPelse(0, ppToken);
return expandedArg;
}
-//
+//
// Return the next token for a macro expansion, handling macro arguments,
// whose semantics are dependent on being adjacent to ##.
//
// Check a token to see if it is a macro that should be expanded.
// If it is, and defined, push a tInput that will produce the appropriate expansion
// and return 1.
-// If it is, but undefined, and expandUndef is requested, push a tInput that will
+// If it is, but undefined, and expandUndef is requested, push a tInput that will
// expand to 0 and return -1.
// Otherwise, return 0 to indicate no expansion, which is not necessarily an error.
//
express or implied, are granted by NVIDIA herein, including but not
limited to any patent rights that may be infringed by your derivative
works or by other works in which the NVIDIA Software may be
-incorporated. No hardware is licensed hereunder.
+incorporated. No hardware is licensed hereunder.
THE NVIDIA SOFTWARE IS BEING PROVIDED ON AN "AS IS" BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED,
express or implied, are granted by NVIDIA herein, including but not
limited to any patent rights that may be infringed by your derivative
works or by other works in which the NVIDIA Software may be
-incorporated. No hardware is licensed hereunder.
+incorporated. No hardware is licensed hereunder.
THE NVIDIA SOFTWARE IS BEING PROVIDED ON AN "AS IS" BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED,
namespace glslang {
-TPpContext::TPpContext(TParseContextBase& pc, const std::string& rootFileName, TShader::Includer& inclr) :
+TPpContext::TPpContext(TParseContextBase& pc, const std::string& rootFileName, TShader::Includer& inclr) :
preamble(0), strings(0), previous_token('\n'), parseContext(pc), includer(inclr), inComment(false),
rootFileName(rootFileName),
currentSourceFile(rootFileName)
express or implied, are granted by NVIDIA herein, including but not
limited to any patent rights that may be infringed by your derivative
works or by other works in which the NVIDIA Software may be
-incorporated. No hardware is licensed hereunder.
+incorporated. No hardware is licensed hereunder.
THE NVIDIA SOFTWARE IS BEING PROVIDED ON AN "AS IS" BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED,
public:
TPpToken() : space(false), ival(0), dval(0.0), i64val(0)
{
- loc.init();
+ loc.init();
name[0] = 0;
}
static const int maxIfNesting = 64;
- int ifdepth; // current #if-#else-#endif nesting in the cpp.c file (pre-processor)
+ int ifdepth; // current #if-#else-#endif nesting in the cpp.c file (pre-processor)
bool elseSeen[maxIfNesting]; // Keep a track of whether an else has been seen at a particular depth
int elsetracker; // #if-#else and #endif constructs...Counter.
int extraTokenCheck(int atom, TPpToken* ppToken, int token);
int eval(int token, int precedence, bool shortCircuit, int& res, bool& err, TPpToken * ppToken);
int evalToToken(int token, bool shortCircuit, int& res, bool& err, TPpToken * ppToken);
- int CPPif (TPpToken * ppToken);
+ int CPPif (TPpToken * ppToken);
int CPPifdef(int defined, TPpToken * ppToken);
int CPPinclude(TPpToken * ppToken);
- int CPPline(TPpToken * ppToken);
- int CPPerror(TPpToken * ppToken);
+ int CPPline(TPpToken * ppToken);
+ int CPPerror(TPpToken * ppToken);
int CPPpragma(TPpToken * ppToken);
int CPPversion(TPpToken * ppToken);
int CPPextension(TPpToken * ppToken);
int ReadToken(TokenStream&, TPpToken*);
void pushTokenStreamInput(TokenStream&, bool pasting = false);
void UngetToken(int token, TPpToken*);
-
+
class tTokenInput : public tInput {
public:
tTokenInput(TPpContext* pp, TokenStream* t, bool prepasting) : tInput(pp), tokens(t), lastTokenPastes(prepasting) { }
return '\\';
} while (ch == '\\');
}
-
+
// handle any non-escaped newline
if (ch == '\r' || ch == '\n') {
if (ch == '\r' && input->peek() == '\n')
TInputScanner* input;
};
- // Holds a reference to included file data, as well as a
+ // Holds a reference to included file data, as well as a
// prologue and an epilogue string. This can be scanned using the tInput
// interface and acts as a single source string.
class TokenizableIncludeFile : public tInput {
express or implied, are granted by NVIDIA herein, including but not
limited to any patent rights that may be infringed by your derivative
works or by other works in which the NVIDIA Software may be
-incorporated. No hardware is licensed hereunder.
+incorporated. No hardware is licensed hereunder.
THE NVIDIA SOFTWARE IS BEING PROVIDED ON AN "AS IS" BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED,
express or implied, are granted by NVIDIA herein, including but not
limited to any patent rights that may be infringed by your derivative
works or by other works in which the NVIDIA Software may be
-incorporated. No hardware is licensed hereunder.
+incorporated. No hardware is licensed hereunder.
THE NVIDIA SOFTWARE IS BEING PROVIDED ON AN "AS IS" BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED,
parseContext.ppError(ppToken->loc, "float literal too long", "", "");
len = 1,str_len=1;
}
- } else
+ } else
ungetChar();
str[len]='\0';
express or implied, are granted by NVIDIA herein, including but not
limited to any patent rights that may be infringed by your derivative
works or by other works in which the NVIDIA Software may be
-incorporated. No hardware is licensed hereunder.
+incorporated. No hardware is licensed hereunder.
THE NVIDIA SOFTWARE IS BEING PROVIDED ON AN "AS IS" BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED,
express or implied, are granted by NVIDIA herein, including but not
limited to any patent rights that may be infringed by your derivative
works or by other works in which the NVIDIA Software may be
-incorporated. No hardware is licensed hereunder.
+incorporated. No hardware is licensed hereunder.
THE NVIDIA SOFTWARE IS BEING PROVIDED ON AN "AS IS" BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED,
express or implied, are granted by NVIDIA herein, including but not
limited to any patent rights that may be infringed by your derivative
works or by other works in which the NVIDIA Software may be
-incorporated. No hardware is licensed hereunder.
+incorporated. No hardware is licensed hereunder.
THE NVIDIA SOFTWARE IS BEING PROVIDED ON AN "AS IS" BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED,
#endif
PpAtomConstString,
- // Identifiers
+ // Identifiers
PpAtomIdentifier,
// preprocessor "keywords"
// there wasn't exactly one entry point.
//
-
namespace glslang {
//
return blockIndex;
}
-
// Are we at a level in a dereference chain at which individual active uniform queries are made?
bool isReflectionGranularity(const TType& type)
{
addAttribute(*base);
}
-
//
// Implement TReflection methods.
//
// Data needed for just a single object at the granularity exchanged by the reflection API
class TObjectReflection {
public:
- TObjectReflection(const TString& pName, const TType& pType, int pOffset, int pGLDefineType, int pSize, int pIndex) :
+ TObjectReflection(const TString& pName, const TType& pType, int pOffset, int pGLDefineType, int pSize, int pIndex) :
name(pName), type(pType.clone()),
offset(pOffset), glDefineType(pGLDefineType), size(pSize), index(pIndex) { }
// for mapping a block index to the block's description
int getNumUniformBlocks() const { return (int)indexToUniformBlock.size(); }
- const TObjectReflection& getUniformBlock(int i) const
+ const TObjectReflection& getUniformBlock(int i) const
{
if (i >= 0 && i < (int)indexToUniformBlock.size())
return indexToUniformBlock[i];
}
// for mapping any name to its index (block names, uniform names and attribute names)
- int getIndex(const char* name) const
+ int getIndex(const char* name) const
{
TNameToIndex::const_iterator it = nameToIndex.find(name);
if (it == nameToIndex.end())
//
//
-// Wrapper for Linux call to DetachThread. This is required as pthread_cleanup_push() expects
+// Wrapper for Linux call to DetachThread. This is required as pthread_cleanup_push() expects
// the cleanup routine to return void.
-//
+//
static void DetachThreadLinux(void *)
{
- DetachThread();
+ DetachThread();
}
-
//
// Registers cleanup handler, sets cancel type and state, and executes the thread specific
-// cleanup handler. This function will be called in the Standalone.cpp for regression
-// testing. When OpenGL applications are run with the driver code, Linux OS does the
+// cleanup handler. This function will be called in the Standalone.cpp for regression
+// testing. When OpenGL applications are run with the driver code, Linux OS does the
// thread cleanup.
-//
+//
void OS_CleanupThreadData(void)
{
#ifdef __ANDROID__
- DetachThreadLinux(NULL);
+ DetachThreadLinux(NULL);
#else
- int old_cancel_state, old_cancel_type;
- void *cleanupArg = NULL;
-
- //
- // Set thread cancel state and push cleanup handler.
- //
- pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, &old_cancel_state);
- pthread_cleanup_push(DetachThreadLinux, (void *) cleanupArg);
-
- //
- // Put the thread in deferred cancellation mode.
- //
- pthread_setcanceltype(PTHREAD_CANCEL_DEFERRED, &old_cancel_type);
-
- //
- // Pop cleanup handler and execute it prior to unregistering the cleanup handler.
- //
- pthread_cleanup_pop(1);
-
- //
- // Restore the thread's previous cancellation mode.
- //
- pthread_setcanceltype(old_cancel_state, NULL);
+ int old_cancel_state, old_cancel_type;
+ void *cleanupArg = NULL;
+
+ //
+ // Set thread cancel state and push cleanup handler.
+ //
+ pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, &old_cancel_state);
+ pthread_cleanup_push(DetachThreadLinux, (void *) cleanupArg);
+
+ //
+ // Put the thread in deferred cancellation mode.
+ //
+ pthread_setcanceltype(PTHREAD_CANCEL_DEFERRED, &old_cancel_type);
+
+ //
+ // Pop cleanup handler and execute it prior to unregistering the cleanup handler.
+ //
+ pthread_cleanup_pop(1);
+
+ //
+ // Restore the thread's previous cancellation mode.
+ //
+ pthread_setcanceltype(old_cancel_state, NULL);
#endif
}
-
//
// Thread Local Storage Operations
//
inline OS_TLSIndex PthreadKeyToTLSIndex(pthread_key_t key)
{
- return (OS_TLSIndex)((uintptr_t)key + 1);
+ return (OS_TLSIndex)((uintptr_t)key + 1);
}
inline pthread_key_t TLSIndexToPthreadKey(OS_TLSIndex nIndex)
{
- return (pthread_key_t)((uintptr_t)nIndex - 1);
+ return (pthread_key_t)((uintptr_t)nIndex - 1);
}
OS_TLSIndex OS_AllocTLSIndex()
{
- pthread_key_t pPoolIndex;
+ pthread_key_t pPoolIndex;
- //
- // Create global pool key.
- //
- if ((pthread_key_create(&pPoolIndex, NULL)) != 0) {
- assert(0 && "OS_AllocTLSIndex(): Unable to allocate Thread Local Storage");
- return OS_INVALID_TLS_INDEX;
- }
- else
- return PthreadKeyToTLSIndex(pPoolIndex);
+ //
+ // Create global pool key.
+ //
+ if ((pthread_key_create(&pPoolIndex, NULL)) != 0) {
+ assert(0 && "OS_AllocTLSIndex(): Unable to allocate Thread Local Storage");
+ return OS_INVALID_TLS_INDEX;
+ }
+ else
+ return PthreadKeyToTLSIndex(pPoolIndex);
}
-
bool OS_SetTLSValue(OS_TLSIndex nIndex, void *lpvValue)
{
- if (nIndex == OS_INVALID_TLS_INDEX) {
- assert(0 && "OS_SetTLSValue(): Invalid TLS Index");
- return false;
- }
+ if (nIndex == OS_INVALID_TLS_INDEX) {
+ assert(0 && "OS_SetTLSValue(): Invalid TLS Index");
+ return false;
+ }
- if (pthread_setspecific(TLSIndexToPthreadKey(nIndex), lpvValue) == 0)
- return true;
- else
- return false;
+ if (pthread_setspecific(TLSIndexToPthreadKey(nIndex), lpvValue) == 0)
+ return true;
+ else
+ return false;
}
void* OS_GetTLSValue(OS_TLSIndex nIndex)
{
- //
- // This function should return 0 if nIndex is invalid.
- //
- assert(nIndex != OS_INVALID_TLS_INDEX);
- return pthread_getspecific(TLSIndexToPthreadKey(nIndex));
+ //
+ // This function should return 0 if nIndex is invalid.
+ //
+ assert(nIndex != OS_INVALID_TLS_INDEX);
+ return pthread_getspecific(TLSIndexToPthreadKey(nIndex));
}
bool OS_FreeTLSIndex(OS_TLSIndex nIndex)
{
- if (nIndex == OS_INVALID_TLS_INDEX) {
- assert(0 && "OS_SetTLSValue(): Invalid TLS Index");
- return false;
- }
+ if (nIndex == OS_INVALID_TLS_INDEX) {
+ assert(0 && "OS_SetTLSValue(): Invalid TLS Index");
+ return false;
+ }
- //
- // Delete the global pool key.
- //
- if (pthread_key_delete(TLSIndexToPthreadKey(nIndex)) == 0)
- return true;
- else
- return false;
+ //
+ // Delete the global pool key.
+ //
+ if (pthread_key_delete(TLSIndexToPthreadKey(nIndex)) == 0)
+ return true;
+ else
+ return false;
}
namespace {
-pthread_mutex_t gMutex;
+ pthread_mutex_t gMutex;
}
void InitGlobalLock()
return ToGenericTLSIndex(dwIndex);
}
-
bool OS_SetTLSValue(OS_TLSIndex nIndex, void *lpvValue)
{
if (nIndex == OS_INVALID_TLS_INDEX) {
//
// ShHandle held by but opaque to the driver. It is allocated,
-// managed, and de-allocated by the compiler/linker. It's contents
+// managed, and de-allocated by the compiler/linker. It's contents
// are defined by and used by the compiler and linker. For example,
-// symbol table information and object code passed from the compiler
+// symbol table information and object code passed from the compiler
// to the linker can be stored where ShHandle points.
//
// If handle creation fails, 0 will be returned.
// The return value of ShCompile is boolean, non-zero indicating
// success.
//
-// The info-log should be written by ShCompile into
+// The info-log should be written by ShCompile into
// ShHandle, so it can answer future queries.
//
SH_IMPORT_EXPORT int ShCompile(
// -----------------------------------
//
// Below is a new alternate C++ interface that might potentially replace the above
-// opaque handle-based interface.
-//
+// opaque handle-based interface.
+//
// The below is further designed to handle multiple compilation units per stage, where
// the intermediate results, including the parse tree, are preserved until link time,
// rather than the above interface which is designed to have each compilation unit
const char *getAttributeName(int index) const; // can be used for glGetActiveAttrib()
int getAttributeType(int index) const; // can be used for glGetActiveAttrib()
const TType* getUniformTType(int index) const; // returns a TType*
- const TType* getUniformBlockTType(int index) const; // returns a TType*
+ const TType* getUniformBlockTType(int index) const; // returns a TType*
const TType* getAttributeTType(int index) const; // returns a TType*
void dumpReflection();
if (attr != EatNone)
attributes[attr] = value;
-
+
return attr;
}
return (entry == attributes.end()) ? nullptr : entry->second;
}
-
+
} // end namespace glslang
// they ARE still accepted as identifiers. This is not a dense space: e.g, "void" is not a
// valid identifier, nor is "linear". This code special cases the known instances of this, so
// e.g, "int sample;" or "float float;" is accepted. Other cases can be added here if needed.
-
+
TString* idString = nullptr;
switch (peek()) {
case EHTokSample: idString = NewPoolTString("sample"); break;
}
// sampler_state
-// : LEFT_BRACE [sampler_state_assignment ... ] RIGHT_BRACE
+// : LEFT_BRACE [sampler_state_assignment ... ] RIGHT_BRACE
//
// sampler_state_assignment
// : sampler_state_identifier EQUAL value SEMICOLON
return true;
parseContext.warn(token.loc, "unimplemented", "immediate sampler state", "");
-
+
do {
// read state name
HlslToken state;
{
if (! acceptTokenClass(EHTokSampler))
return false;
-
+
// TODO: remove this when DX9 style declarations are implemented.
unimplemented("Direct3D 9 sampler declaration");
return false;
}
-
// declaration
// : sampler_declaration_dx9 post_decls SEMICOLON
// | fully_specified_type declarator_list SEMICOLON
// be possible to simultaneously compile D3D10+ style shaders and DX9 shaders. If we want to compile DX9
// HLSL shaders, this will have to be a master level switch
// As such, the sampler keyword in D3D10+ turns into an automatic sampler type, and is commonly used
- // For that reason, this line is commented out
+ // For that reason, this line is commented out
// if (acceptSamplerDeclarationDX9(declaredType))
// return true;
if (declaredType.getQualifier().storage == EvqTemporary && parseContext.symbolTable.atGlobalLevel())
declaredType.getQualifier().storage = EvqUniform;
- // We can handle multiple variables per type declaration, so
+ // We can handle multiple variables per type declaration, so
// the number of types can expand when arrayness is different.
TType variableType;
variableType.shallowCopy(declaredType);
expected(";");
return false;
}
-
+
return true;
}
qualifier.storage = EvqIn;
if (!parseContext.handleInputGeometry(token.loc, ElgLinesAdjacency))
return false;
- break;
+ break;
case EHTokTriangleAdj:
qualifier.storage = EvqIn;
if (!parseContext.handleInputGeometry(token.loc, ElgTrianglesAdjacency))
return false;
- break;
-
+ break;
+
default:
return true;
}
expected(",");
return false;
}
-
+
// integer cols
if (! peekTokenClass(EHTokIntConstant)) {
expected("literal integer");
if (! acceptTokenClass(EHTokLeftAngle))
return false;
-
+
if (! acceptType(type)) {
expected("stream output type");
return false;
return true;
}
-
+
// annotations
// : LEFT_ANGLE declaration SEMI_COLON ... declaration SEMICOLON RIGHT_ANGLE
//
case EHTokTexture1darray: dim = Esd1D; array = true; break;
case EHTokTexture2d: dim = Esd2D; break;
case EHTokTexture2darray: dim = Esd2D; array = true; break;
- case EHTokTexture3d: dim = Esd3D; break;
+ case EHTokTexture3d: dim = Esd3D; break;
case EHTokTextureCube: dim = EsdCube; break;
case EHTokTextureCubearray: dim = EsdCube; array = true; break;
case EHTokTexture2DMS: dim = Esd2D; ms = true; break;
advanceToken(); // consume the texture object keyword
TType txType(EbtFloat, EvqUniform, 4); // default type is float4
-
+
TIntermTyped* msCount = nullptr;
// texture type: required for multisample types and RWBuffer/RWTextures!
// Remember the declared vector size.
sampler.vectorSize = txType.getVectorSize();
-
+
type.shallowCopy(TType(sampler, EvqUniform, arraySizes));
type.getQualifier().layoutFormat = format;
return true;
}
-
// If token is for a type, update 'type' with the type information,
// and return true and advance.
// Otherwise, return false, and don't advance
if (! parseContext.handleOutputGeometry(token.loc, geometry))
return false;
-
+
return true;
}
new(&type) TType(EbtUint, EvqTemporary, 4);
break;
-
case EHTokBool:
new(&type) TType(EbtBool);
break;
case EHTokHalf4:
new(&type) TType(half_bt, EvqTemporary, EpqMedium, 4);
break;
-
+
case EHTokMin16float:
new(&type) TType(min16float_bt, EvqTemporary, EpqMedium);
break;
case EHTokMin16float4:
new(&type) TType(min16float_bt, EvqTemporary, EpqMedium, 4);
break;
-
+
case EHTokMin10float:
new(&type) TType(min10float_bt, EvqTemporary, EpqMedium);
break;
case EHTokMin10float4:
new(&type) TType(min10float_bt, EvqTemporary, EpqMedium, 4);
break;
-
+
case EHTokMin16int:
new(&type) TType(min16int_bt, EvqTemporary, EpqMedium);
break;
case EHTokMin16int4:
new(&type) TType(min16int_bt, EvqTemporary, EpqMedium, 4);
break;
-
+
case EHTokMin12int:
new(&type) TType(min12int_bt, EvqTemporary, EpqMedium);
break;
case EHTokMin12int4:
new(&type) TType(min12int_bt, EvqTemporary, EpqMedium, 4);
break;
-
+
case EHTokMin16uint:
new(&type) TType(min16uint_bt, EvqTemporary, EpqMedium);
break;
return true;
}
-
// default_parameter_declaration
// : EQUAL conditional_expression
// : EQUAL initializer
return false;
TIntermTyped* arguments = nullptr;
- for (int i=0; i<int(node->getAsAggregate()->getSequence().size()); i++)
+ for (int i = 0; i < int(node->getAsAggregate()->getSequence().size()); i++)
parseContext.handleFunctionArgument(constructor, arguments, node->getAsAggregate()->getSequence()[i]->getAsTyped());
-
+
node = parseContext.handleFunctionCall(token.loc, constructor, node);
}
// peek for "op unary_expression"
TOperator unaryOp = HlslOpMap::preUnary(peek());
-
+
// postfix_expression (if no unary operator)
if (unaryOp == EOpNull)
return acceptPostfixExpression(node);
return false;
}
} else if (acceptLiteral(node)) {
- // literal (nothing else to do yet), go on to the
+ // literal (nothing else to do yet), go on to the
} else if (acceptConstructor(node)) {
// constructor (nothing else to do yet)
} else if (acceptIdentifier(idToken)) {
TIntermTyped* node;
bool expectingExpression = false;
-
+
while (acceptAssignmentExpression(node)) {
expectingExpression = false;
expressions->getSequence().push_back(node);
// SEMICOLON
if (! acceptTokenClass(EHTokSemicolon))
expected(";");
-
+
return true;
}
void HlslGrammar::acceptPostDecls(TQualifier& qualifier)
{
do {
- // COLON
+ // COLON
if (acceptTokenClass(EHTokColon)) {
HlslToken idToken;
if (peekTokenClass(EHTokLayout))
namespace glslang {
class TAttributeMap; // forward declare
-
+
// Should just be the grammar aspect of HLSL.
// Described in more detail in hlslGrammar.cpp.
case EHTokDash: return EOpNegative;
case EHTokBang: return EOpLogicalNot;
case EHTokTilde: return EOpBitwiseNot;
-
+
case EHTokIncOp: return EOpPreIncrement;
case EHTokDecOp: return EOpPreDecrement;
switch (op) {
case EHTokDot: return EOpIndexDirectStruct;
case EHTokLeftBracket: return EOpIndexIndirect;
-
+
case EHTokIncOp: return EOpPostIncrement;
case EHTokDecOp: return EOpPostDecrement;
globalInputDefaults.clear();
globalOutputDefaults.clear();
- // "Shaders in the transform
+ // "Shaders in the transform
// feedback capturing mode have an initial global default of
// layout(xfb_buffer = 0) out;"
if (language == EShLangVertex ||
TIntermAggregate* lhsAsAggregate = node->getAsAggregate();
TIntermTyped* object = lhsAsAggregate->getSequence()[0]->getAsTyped();
-
+
if (!object->getType().getSampler().isImage()) {
error(loc, "operator[] on a non-RW texture must be an r-value", "", "");
return true;
// series of other image operations.
//
// Most things are passed through unmodified, except for error checking.
-//
+//
TIntermTyped* HlslParseContext::handleLvalue(const TSourceLoc& loc, const char* op, TIntermTyped* node)
{
if (node == nullptr)
lhsAsAggregate = lhsAsBinary->getLeft()->getAsAggregate();
lhsIsSwizzle = true;
}
-
+
TIntermTyped* object = lhsAsAggregate->getSequence()[0]->getAsTyped();
TIntermTyped* coord = lhsAsAggregate->getSequence()[1]->getAsTyped();
TIntermSymbol* rhsTmp = rhs->getAsSymbolNode();
TIntermTyped* coordTmp = coord;
-
+
if (rhsTmp == nullptr || isModifyOp || lhsIsSwizzle) {
rhsTmp = addTmpVar("storeTemp", objDerefType);
// rhsTmp op= rhs.
makeBinary(assignOp, addSwizzle(intermediate.addSymbol(*rhsTmp), lhsAsBinary), rhs);
}
-
+
makeStore(object, coordTmp, rhsTmp); // add a store
return finishSequence(rhsTmp, objDerefType); // return rhsTmp from sequence
}
// rhsTmp op
// OpImageStore(object, coordTmp, rhsTmp)
// rhsTmp
-
+
TIntermSymbol* rhsTmp = addTmpVar("storeTemp", objDerefType);
TIntermTyped* coordTmp = addTmpVar("coordTemp", coord->getType());
-
+
makeBinary(EOpAssign, coordTmp, coord); // coordtmp = load[param1]
makeLoad(rhsTmp, object, coordTmp, objDerefType); // rhsTmp = OpImageLoad(object, coordTmp)
makeUnary(assignOp, rhsTmp); // op rhsTmp
makeStore(object, coordTmp, rhsTmp2); // OpImageStore(object, coordTmp, rhsTmp2)
return finishSequence(rhsTmp1, objDerefType); // return rhsTmp from sequence
}
-
+
default:
break;
}
if (lhs)
if (lValueErrorCheck(loc, op, lhs))
return nullptr;
-
+
return node;
}
//
// methods can't be resolved until we later see the function-calling syntax.
- // Save away the name in the AST for now. Processing is completed in
+ // Save away the name in the AST for now. Processing is completed in
// handleLengthMethod(), etc.
//
if (field == "length") {
// We can ignore arrayness: it's uninvolved.
if (type.isStruct()) {
TTypeList* userStructure = type.getWritableStruct();
-
+
// Get iterator to (now at end) set of builtin iterstage IO members
const auto firstIo = std::stable_partition(userStructure->begin(), userStructure->end(),
[](const TTypeLoc& t) {return !t.type->isBuiltInInterstageIO();});
// Determine whether we should flatten an arbitrary type.
bool HlslParseContext::shouldFlatten(const TType& type) const
{
- return shouldFlattenIO(type) || shouldFlattenUniform(type);
+ return shouldFlattenIO(type) || shouldFlattenUniform(type);
}
// Is this an IO variable that can't be passed down the stack?
const TType& type = variable.getType();
// emplace gives back a pair whose .first is an iterator to the item...
- auto entry = flattenMap.emplace(variable.getUniqueId(),
+ auto entry = flattenMap.emplace(variable.getUniqueId(),
TFlattenData(type.getQualifier().layoutBinding));
// ... and the item is a map pair, so first->second is the TFlattenData itself.
flatten(loc, variable, type, entry.first->second, "");
}
-
+
// Recursively flatten the given variable at the provided type, building the flattenData as we go.
//
// This is mutually recursive with flattenStruct and flattenArray.
// Add a single flattened member to the flattened data being tracked for the composite
// Returns true for the final flattening level.
-int HlslParseContext::addFlattenedMember(const TSourceLoc& loc,
- const TVariable& variable, const TType& type, TFlattenData& flattenData,
+int HlslParseContext::addFlattenedMember(const TSourceLoc& loc,
+ const TVariable& variable, const TType& type, TFlattenData& flattenData,
const TString& memberName, bool track)
{
if (isFinalFlattening(type)) {
// effecting a transfer of this information to the flattened variable form.
//
// Assumes shouldFlatten() or equivalent was called first.
-int HlslParseContext::flattenStruct(const TSourceLoc& loc, const TVariable& variable, const TType& type,
+int HlslParseContext::flattenStruct(const TSourceLoc& loc, const TVariable& variable, const TType& type,
TFlattenData& flattenData, TString name)
{
assert(type.isStruct());
// equivalent set of individual variables.
//
// Assumes shouldFlatten() or equivalent was called first.
-int HlslParseContext::flattenArray(const TSourceLoc& loc, const TVariable& variable, const TType& type,
+int HlslParseContext::flattenArray(const TSourceLoc& loc, const TVariable& variable, const TType& type,
TFlattenData& flattenData, TString name)
{
assert(type.isArray());
int pos = start;
flattenData.offsets.resize(int(pos + size), -1);
- for (int element=0; element < size; ++element) {
+ for (int element=0; element < size; ++element) {
char elementNumBuf[20]; // sufficient for MAXINT
snprintf(elementNumBuf, sizeof(elementNumBuf)-1, "[%d]", element);
const int mpos = addFlattenedMember(loc, variable, dereferencedType, flattenData,
wasSplit(node->getAsSymbolNode()->getId());
}
-
// Turn an access into an aggregate that was flattened to instead be
// an access to the individual variable the member was flattened to.
// Assumes shouldFlatten() or equivalent was called first.
builtInIoIndex = index;
}
-
// Turn an access into an struct that was split to instead be an
// access to either the modified structure, or a direct reference to
// one of the split member variables.
return function;
}
-
// Add interstage IO variables to the linkage in canonical order.
void HlslParseContext::addInterstageIoToLinkage()
{
}
//
-// Handle seeing the function prototype in front of a function definition in the grammar.
+// Handle seeing the function prototype in front of a function definition in the grammar.
// The body is handled after this function returns.
//
-TIntermAggregate* HlslParseContext::handleFunctionDefinition(const TSourceLoc& loc, TFunction& function,
+TIntermAggregate* HlslParseContext::handleFunctionDefinition(const TSourceLoc& loc, TFunction& function,
const TAttributeMap& attributes)
{
currentCaller = function.getMangledName();
const TIntermAggregate* numThreads = attributes[EatNumThreads];
if (numThreads != nullptr) {
const TIntermSequence& sequence = numThreads->getSequence();
-
+
for (int lid = 0; lid < int(sequence.size()); ++lid)
intermediate.setLocalSize(lid, sequence[lid]->getAsConstantUnion()->getConstArray()[0].getIConst());
}
// 1. 1 item to copy: Use the RHS directly.
// 2. >1 item, simple symbol RHS: we'll create a new TIntermSymbol node for each, but no assign to temp.
// 3. >1 item, complex RHS: assign it to a new temp variable, and create a TIntermSymbol for each member.
-
+
if (memberCount <= 1) {
// case 1: we'll use the symbol directly below. Nothing to do.
} else {
// subtree here IFF it does not itself contain any interstage built-in IO variables, so we only have to
// recurse into it if there's something for splitting to do. That can save a lot of AST verbosity for
// a bunch of memberwise copies.
- if (isFinalFlattening(typeL) || (!isFlattenLeft && !isFlattenRight &&
+ if (isFinalFlattening(typeL) || (!isFlattenLeft && !isFlattenRight &&
!typeL.containsBuiltInInterstageIO() && !typeR.containsBuiltInInterstageIO())) {
assignList = intermediate.growAggregate(assignList, intermediate.addAssign(op, subSplitLeft, subSplitRight, loc), loc);
} else {
case EOpInterlockedOr: return isImage ? EOpImageAtomicOr : EOpAtomicOr;
case EOpInterlockedXor: return isImage ? EOpImageAtomicXor : EOpAtomicXor;
case EOpInterlockedExchange: return isImage ? EOpImageAtomicExchange : EOpAtomicExchange;
- case EOpInterlockedCompareStore: // TODO: ...
+ case EOpInterlockedCompareStore: // TODO: ...
default:
error(loc, "unknown atomic operation", "unknown op", "");
return EOpNull;
tex->getSequence().push_back(arg0); // sampler
tex->getSequence().push_back(constructCoord); // coordinate
tex->getSequence().push_back(bias); // bias
-
+
node = clampReturn(tex, sampler);
break;
break;
}
-
+
case EOpMethodSampleGrad: // ...
{
TIntermTyped* argTex = argAggregate->getSequence()[0]->getAsTyped();
} else {
indexedOut = sizeQueryReturn;
}
-
+
TIntermTyped* outParam = argAggregate->getSequence()[outParamBase + compNum]->getAsTyped();
TIntermTyped* compAssign = intermediate.addAssign(EOpAssign, outParam, indexedOut, loc);
samplesQuery->getSequence().push_back(argTex);
samplesQuery->setType(TType(EbtUint, EvqTemporary, 1));
samplesQuery->setLoc(loc);
-
+
TIntermTyped* compAssign = intermediate.addAssign(EOpAssign, outParam, samplesQuery, loc);
compoundStatement = intermediate.growAggregate(compoundStatement, compAssign);
}
// optional offset value
if (argAggregate->getSequence().size() > 4)
argOffset = argAggregate->getSequence()[4]->getAsTyped();
-
+
const int coordDimWithCmpVal = argCoord->getType().getVectorSize() + 1; // +1 for cmp
// AST wants comparison value as one of the texture coordinates
TIntermTyped* argLod = argAggregate->getSequence()[3]->getAsTyped();
TIntermTyped* argOffset = nullptr;
const TSampler& sampler = argTex->getType().getSampler();
-
+
const int numArgs = (int)argAggregate->getSequence().size();
if (numArgs == 5) // offset, if present
argOffset = argAggregate->getSequence()[4]->getAsTyped();
-
+
const TOperator textureOp = (argOffset == nullptr ? EOpTextureLod : EOpTextureLodOffset);
TIntermAggregate* txsample = new TIntermAggregate(textureOp);
break;
}
-
+
case EOpMethodGatherRed: // fall through...
case EOpMethodGatherGreen: // ...
case EOpMethodGatherBlue: // ...
emit->setType(TType(EbtVoid));
sequence = intermediate.growAggregate(sequence,
- handleAssign(loc, EOpAssign,
+ handleAssign(loc, EOpAssign,
argAggregate->getSequence()[0]->getAsTyped(),
argAggregate->getSequence()[1]->getAsTyped()),
loc);
if (!decomposeHlslIntrinsics || !node || !node->getAsOperator())
return;
-
+
const TIntermAggregate* argAggregate = arguments ? arguments->getAsAggregate() : nullptr;
TIntermUnary* fnUnary = node->getAsUnaryNode();
const TOperator op = node->getAsOperator()->getOp();
arg0->getType().isVector()));
// calculate # of components for comparison const
- const int constComponentCount =
+ const int constComponentCount =
std::max(arg0->getType().getVectorSize(), 1) *
std::max(arg0->getType().getMatrixCols(), 1) *
std::max(arg0->getType().getMatrixRows(), 1);
TIntermTyped* zero = intermediate.addConstantUnion(0, type0, loc, true);
compareNode = handleBinaryMath(loc, "clip", EOpLessThan, arg0, zero);
}
-
+
TIntermBranch* killNode = intermediate.addBranch(EOpKill, loc);
node = new TIntermSelection(compareNode, killNode, nullptr);
node->setLoc(loc);
-
+
break;
}
atomic->setType(arg0->getType());
atomic->getWritableType().getQualifier().makeTemporary();
atomic->setLoc(loc);
-
+
if (isImage) {
// orig_value = imageAtomicOp(image, loc, data)
imageAtomicParams(atomic, arg0);
atomic->getSequence().push_back(arg1);
atomic->getSequence().push_back(arg2);
node = intermediate.addAssign(EOpAssign, arg3, atomic, loc);
-
+
break;
}
intermediate.addConversion(EOpConstructFloat,
TType(EbtFloat, EvqTemporary, 2), iU),
recip16);
-
+
TIntermAggregate* interp = new TIntermAggregate(EOpInterpolateAtOffset);
interp->getSequence().push_back(arg0);
interp->getSequence().push_back(floatOffset);
TIntermTyped* n_dot_h_m = handleBinaryMath(loc, "mul", EOpMul, n_dot_h, m); // n_dot_h * m
dst->getSequence().push_back(intermediate.addSelection(compare, zero, n_dot_h_m, loc));
-
+
// One:
dst->getSequence().push_back(intermediate.addConstantUnion(1.0, EbtFloat, loc, true));
convert->setLoc(loc);
convert->setType(TType(EbtDouble, EvqTemporary));
node = convert;
-
+
break;
}
-
+
case EOpF16tof32:
case EOpF32tof16:
{
// for decompositions, since we want to operate on the function node, not the aggregate holding
// output conversions.
- const TIntermTyped* fnNode = result;
+ const TIntermTyped* fnNode = result;
decomposeIntrinsic(loc, result, arguments); // HLSL->AST intrinsic decompositions
decomposeSampleMethods(loc, result, arguments); // HLSL->AST sample method decompositions
}
// Finish processing object.length(). This started earlier in handleDotDereference(), where
-// the ".length" part was recognized and semantically checked, and finished here where the
+// the ".length" part was recognized and semantically checked, and finished here where the
// function syntax "()" is recognized.
//
// Return resulting tree node.
// object itself.
TVariable* internalAggregate = makeInternalVariable("aggShadow", *function[i].type);
internalAggregate->getWritableType().getQualifier().makeTemporary();
- TIntermSymbol* internalSymbolNode = new TIntermSymbol(internalAggregate->getUniqueId(),
+ TIntermSymbol* internalSymbolNode = new TIntermSymbol(internalAggregate->getUniqueId(),
internalAggregate->getName(),
internalAggregate->getType());
internalSymbolNode->setLoc(arg->getLoc());
// Also, in DX10 if a SV value is present as the input of a stage, but isn't appropriate for that
// stage, it would just be ignored as it is likely there as part of an output struct from one stage
// to the next
-
-
+
bool bParseDX9 = false;
if (bParseDX9) {
if (semanticUpperCase == "PSIZE")
else if( semanticUpperCase == "SV_COVERAGE")
qualifier.builtIn = EbvSampleMask;
- //TODO, these need to get refined to be more specific
+ //TODO, these need to get refined to be more specific
else if( semanticUpperCase == "SV_DEPTHGREATEREQUAL")
qualifier.builtIn = EbvFragDepthGreater;
else if( semanticUpperCase == "SV_DEPTHLESSEQUAL")
error(loc, "not allowed in nested scope", token, "");
}
-
bool HlslParseContext::builtInName(const TString& /*identifier*/)
{
return false;
qualifier.layoutComponent = value;
return;
} else if (id.compare(0, 4, "xfb_") == 0) {
- // "Any shader making any static use (after preprocessing) of any of these
- // *xfb_* qualifiers will cause the shader to be in a transform feedback
- // capturing mode and hence responsible for describing the transform feedback
+ // "Any shader making any static use (after preprocessing) of any of these
+ // *xfb_* qualifiers will cause the shader to be in a transform feedback
+ // capturing mode and hence responsible for describing the transform feedback
// setup."
intermediate.setXfbMode();
if (id == "xfb_buffer") {
qualifier.layoutXfbOffset = value;
return;
} else if (id == "xfb_stride") {
- // "The resulting stride (implicit or explicit), when divided by 4, must be less than or equal to the
+ // "The resulting stride (implicit or explicit), when divided by 4, must be less than or equal to the
// implementation-dependent constant gl_MaxTransformFeedbackInterleavedComponents."
if (value > 4 * resources.maxTransformFeedbackInterleavedComponents)
error(loc, "1/4 stride is too large:", id.c_str(), "gl_MaxTransformFeedbackInterleavedComponents is %d", resources.maxTransformFeedbackInterleavedComponents);
// Merge any layout qualifier information from src into dst, leaving everything else in dst alone
//
// "More than one layout qualifier may appear in a single declaration.
-// Additionally, the same layout-qualifier-name can occur multiple times
-// within a layout qualifier or across multiple layout qualifiers in the
-// same declaration. When the same layout-qualifier-name occurs
-// multiple times, in a single declaration, the last occurrence overrides
-// the former occurrence(s). Further, if such a layout-qualifier-name
-// will effect subsequent declarations or other observable behavior, it
-// is only the last occurrence that will have any effect, behaving as if
-// the earlier occurrence(s) within the declaration are not present.
-// This is also true for overriding layout-qualifier-names, where one
-// overrides the other (e.g., row_major vs. column_major); only the last
-// occurrence has any effect."
+// Additionally, the same layout-qualifier-name can occur multiple times
+// within a layout qualifier or across multiple layout qualifiers in the
+// same declaration. When the same layout-qualifier-name occurs
+// multiple times, in a single declaration, the last occurrence overrides
+// the former occurrence(s). Further, if such a layout-qualifier-name
+// will effect subsequent declarations or other observable behavior, it
+// is only the last occurrence that will have any effect, behaving as if
+// the earlier occurrence(s) within the declaration are not present.
+// This is also true for overriding layout-qualifier-names, where one
+// overrides the other (e.g., row_major vs. column_major); only the last
+// occurrence has any effect."
//
void HlslParseContext::mergeObjectLayoutQualifiers(TQualifier& dst, const TQualifier& src, bool inheritOnly)
{
// Look up a function name in the symbol table, and make sure it is a function.
//
// First, look for an exact match. If there is none, use the generic selector
-// TParseContextBase::selectFunction() to find one, parameterized by the
+// TParseContextBase::selectFunction() to find one, parameterized by the
// convertible() and better() predicates defined below.
//
// Return the function symbol if found, otherwise nullptr.
// create list of candidates to send
TVector<const TFunction*> candidateList;
symbolTable.findFunctionNameList(call.getMangledName(), candidateList, builtIn);
-
+
// These builtin ops can accept any type, so we bypass the argument selection
if (candidateList.size() == 1 && builtIn &&
(candidateList[0]->getBuiltInOp() == EOpMethodAppend ||
return true;
// no aggregate conversions
- if (from.isArray() || to.isArray() ||
+ if (from.isArray() || to.isArray() ||
from.isStruct() || to.isStruct())
return false;
// We do not promote the texture or image type for these ocodes. Normally that would not
// be an issue because it's a buffer, but we haven't decomposed the opcode yet, and at this
// stage it's merely e.g, a basic integer type.
- //
+ //
// Instead, we want to promote other arguments, but stay within the same family. In other
// words, InterlockedAdd(RWBuffer<int>, ...) will always use the int flavor, never the uint flavor,
// but it is allowed to promote its other arguments.
// for ambiguity reporting
bool tie = false;
-
+
// send to the generic selector
const TFunction* bestMatch = selectFunction(candidateList, call, convertible, better, tie);
//
// Do everything necessary to handle a typedef declaration, for a single symbol.
-//
+//
// 'parseType' is the type part of the declaration (to the left)
// 'arraySizes' is the arrayness tagged on the identifier (to the right)
//
error(memberLoc, "member cannot contradict block", "stream", "");
}
- // "This includes a block's inheritance of the
- // current global default buffer, a block member's inheritance of the block's
- // buffer, and the requirement that any *xfb_buffer* declared on a block
+ // "This includes a block's inheritance of the
+ // current global default buffer, a block member's inheritance of the block's
+ // buffer, and the requirement that any *xfb_buffer* declared on a block
// member must match the buffer inherited from the block."
if (memberQualifier.hasXfbBuffer()) {
if (defaultQualification.layoutXfbBuffer != memberQualifier.layoutXfbBuffer)
}
//
-// "For a block, this process applies to the entire block, or until the first member
-// is reached that has a location layout qualifier. When a block member is declared with a location
+// "For a block, this process applies to the entire block, or until the first member
+// is reached that has a location layout qualifier. When a block member is declared with a location
// qualifier, its location comes from that qualifier: The member's location qualifier overrides the block-level
-// declaration. Subsequent members are again assigned consecutive locations, based on the newest location,
-// until the next member declared with a location qualifier. The values used for locations do not have to be
+// declaration. Subsequent members are again assigned consecutive locations, based on the newest location,
+// until the next member declared with a location qualifier. The values used for locations do not have to be
// declared in increasing order."
void HlslParseContext::fixBlockLocations(const TSourceLoc& loc, TQualifier& qualifier, TTypeList& typeList, bool memberWithLocation, bool memberWithoutLocation)
{
- // "If a block has no block-level location layout qualifier, it is required that either all or none of its members
+ // "If a block has no block-level location layout qualifier, it is required that either all or none of its members
// have a location layout qualifier, or a compile-time error results."
if (! qualifier.hasLocation() && memberWithLocation && memberWithoutLocation)
error(loc, "either the block needs a location, or all members need a location, or no members have a location", "location", "");
void HlslParseContext::fixBlockXfbOffsets(TQualifier& qualifier, TTypeList& typeList)
{
- // "If a block is qualified with xfb_offset, all its
- // members are assigned transform feedback buffer offsets. If a block is not qualified with xfb_offset, any
- // members of that block not qualified with an xfb_offset will not be assigned transform feedback buffer
+ // "If a block is qualified with xfb_offset, all its
+ // members are assigned transform feedback buffer offsets. If a block is not qualified with xfb_offset, any
+ // members of that block not qualified with an xfb_offset will not be assigned transform feedback buffer
// offsets."
if (! qualifier.hasXfbBuffer() || ! qualifier.hasXfbOffset())
qualifier.layoutXfbOffset = TQualifier::layoutXfbOffsetEnd;
}
-// Calculate and save the offset of each block member, using the recursively
+// Calculate and save the offset of each block member, using the recursively
// defined block offset rules and the user-provided offset and align.
//
-// Also, compute and save the total size of the block. For the block's size, arrayness
+// Also, compute and save the total size of the block. For the block's size, arrayness
// is not taken into account, as each element is backed by a separate buffer.
//
void HlslParseContext::fixBlockUniformOffsets(const TQualifier& qualifier, TTypeList& typeList)
subMatrixLayout != ElmNone ? subMatrixLayout == ElmRowMajor
: qualifier.layoutMatrix == ElmRowMajor);
if (memberQualifier.hasOffset()) {
- // "The specified offset must be a multiple
+ // "The specified offset must be a multiple
// of the base alignment of the type of the block member it qualifies, or a compile-time error results."
if (! IsMultipleOfPow2(memberQualifier.layoutOffset, memberAlignment))
error(memberLoc, "must be a multiple of the member's alignment", "offset", "");
- // "The offset qualifier forces the qualified member to start at or after the specified
- // integral-constant expression, which will be its byte offset from the beginning of the buffer.
- // "The actual offset of a member is computed as
+ // "The offset qualifier forces the qualified member to start at or after the specified
+ // integral-constant expression, which will be its byte offset from the beginning of the buffer.
+ // "The actual offset of a member is computed as
// follows: If offset was declared, start with that offset, otherwise start with the next available offset."
offset = std::max(offset, memberQualifier.layoutOffset);
}
- // "The actual alignment of a member will be the greater of the specified align alignment and the standard
+ // "The actual alignment of a member will be the greater of the specified align alignment and the standard
// (e.g., std140) base alignment for the member's type."
if (memberQualifier.hasAlign())
memberAlignment = std::max(memberAlignment, memberQualifier.layoutAlign);
// "If the resulting offset is not a multiple of the actual alignment,
- // increase it to the first offset that is a multiple of
+ // increase it to the first offset that is a multiple of
// the actual alignment."
RoundToPow2(offset, memberAlignment);
typeList[member].type->getQualifier().layoutOffset = offset;
TParseContextBase::finish();
}
-
} // end namespace glslang
// * note, that appropriately gives an error if redeclaring a block that
// was already used and hence already copied-up
//
- // - on seeing a layout declaration that sizes the array, fix everything in the
+ // - on seeing a layout declaration that sizes the array, fix everything in the
// resize-list, giving errors for mismatch
//
// - on seeing an array size declaration, give errors on mismatch between it and previous
//
//
-// Create strings that declare built-in definitions, add built-ins programmatically
+// Create strings that declare built-in definitions, add built-ins programmatically
// that cannot be expressed in the strings, and establish mappings between
// built-in functions and operators.
//
return true;
}
- const bool isGather =
- (name == "Gather" ||
+ const bool isGather =
+ (name == "Gather" ||
name == "GatherRed" ||
- name == "GatherGreen" ||
+ name == "GatherGreen" ||
name == "GatherBlue" ||
name == "GatherAlpha");
- const bool isGatherCmp =
+ const bool isGatherCmp =
(name == "GatherCmpRed" ||
name == "GatherCmpGreen" ||
name == "GatherCmpBlue" ||
} else if (*nthArgOrder == '<') { // input params
++nthArgOrder;
s.append("in ");
- }
+ }
return nthArgOrder;
}
return arg == nullptr || *arg == '\0' || *arg == ',';
}
-
// If this is a fixed vector size, such as V3, return the size. Else return 0.
-int FixedVecSize(const char* arg)
+int FixedVecSize(const char* arg)
{
while (!IsEndOfArg(arg)) {
if (isdigit(*arg))
return 0; // none found.
}
-
// Create and return a type name. This is done in GLSL, not HLSL conventions, until such
// time as builtins are parsed using the HLSL parser.
//
char type = *argType;
if (isTranspose) { // Take transpose of matrix dimensions
- std::swap(dim0, dim1);
+ std::swap(dim0, dim1);
} else if (isTexture) {
if (type == 'F') // map base type to texture of that type.
type = 'T'; // e.g, int -> itexture, uint -> utexture, etc.
case 'S': s += "sampler"; break;
case 's': s += "SamplerComparisonState"; break;
case 'T': s += ((isBuffer && isImage) ? "RWBuffer" :
- isBuffer ? "Buffer" :
+ isBuffer ? "Buffer" :
isImage ? "RWTexture" : "Texture"); break;
case 'i': s += ((isBuffer && isImage) ? "RWBuffer" :
- isBuffer ? "Buffer" :
+ isBuffer ? "Buffer" :
isImage ? "RWTexture" : "Texture"); break;
case 'u': s += ((isBuffer && isImage) ? "RWBuffer" :
isBuffer ? "Buffer" :
case 'V':
s += ('0' + char(dim0));
break;
- case 'M':
+ case 'M':
s += ('0' + char(dim0));
s += 'x';
s += ('0' + char(dim1));
return true;
}
-
// return position of end of argument specifier
inline const char* FindEndOfArg(const char* arg)
{
if (fixedVecSize > 0) // handle fixed sized vectors
dim0Min = dim0Max = fixedVecSize;
}
-
+
} // end anonymous namespace
namespace glslang {
{
}
-
//
// Handle creation of mat*mat specially, since it doesn't fall conveniently out of
// the generic prototype creation code below.
s.append(");\n"); // close paren
-
// Create V*M
AppendTypeName(s, "V", "F", xCols, 1); // add return type
s.append(" "); // space between type and name
// needed for furture validation. For now, they are commented out, and set below
// to EShLangAll, to allow any intrinsic to be used in any shader, which is legal
// if it is not called.
- //
+ //
// static const EShLanguageMask EShLangPSCS = EShLanguageMask(EShLangFragmentMask | EShLangComputeMask);
// static const EShLanguageMask EShLangVSPSGS = EShLanguageMask(EShLangVertexMask | EShLangFragmentMask | EShLangGeometryMask);
// static const EShLanguageMask EShLangCS = EShLangComputeMask;
{ "GetSamplePosition", "V2", "F", "$&2,S", "FUI,I", EShLangVSPSGS },
- //
+ //
// UINT Width
// UINT MipLevel, UINT Width, UINT NumberOfLevels
{ "GetDimensions", /* 1D */ "-", "-", "%!~1,>S", "FUI,U", EShLangAll },
AppendTypeName(s, nthArgOrder, nthArgType, argDim0, dim1); // Add arguments
}
-
+
s.append(");\n"); // close paren and trailing semicolon
} // dim 1 loop
} // dim 0 loop
// skip over special characters
if (isTexture && isalpha(argOrder[1]))
++argOrder;
- if (isdigit(argOrder[1]))
+ if (isdigit(argOrder[1]))
++argOrder;
} // arg order loop
-
+
if (intrinsic.stage == EShLangAll) // common builtins are only added once.
break;
}
{
}
-
//
// Finish adding/processing context-independent built-in symbols.
// 1) Programmatically add symbols that could not be added by simple text strings above.
//
// Add context-dependent (resource-specific) built-ins not handled by the above. These
-// would be ones that need to be programmatically added because they cannot
+// would be ones that need to be programmatically added because they cannot
// be added by simple text strings. For these, also
// 1) Map built-in functions to operators, for those that will turn into an operation node
// instead of remaining a function call.
{
}
-
} // end namespace glslang
void initialize(const TBuiltInResource& resources, int version, EProfile, const SpvVersion& spvVersion, EShLanguage);
void identifyBuiltIns(int version, EProfile profile, const SpvVersion& spvVersion, EShLanguage language, TSymbolTable& symbolTable);
-
+
void identifyBuiltIns(int version, EProfile profile, const SpvVersion& spvVersion, EShLanguage language, TSymbolTable& symbolTable, const TBuiltInResource &resources);
private:
// preprocessor includes
#include "../glslang/MachineIndependent/preprocessor/PpContext.h"
#include "../glslang/MachineIndependent/preprocessor/PpTokens.h"
-
+
namespace {
struct str_eq
(*KeywordMap)["half"] = EHTokHalf;
(*KeywordMap)["float"] = EHTokFloat;
(*KeywordMap)["double"] = EHTokDouble;
- (*KeywordMap)["min16float"] = EHTokMin16float;
+ (*KeywordMap)["min16float"] = EHTokMin16float;
(*KeywordMap)["min10float"] = EHTokMin10float;
(*KeywordMap)["min16int"] = EHTokMin16int;
(*KeywordMap)["min12int"] = EHTokMin12int;
(*KeywordMap)["RWTexture3D"] = EHTokRWTexture3d;
(*KeywordMap)["RWBuffer"] = EHTokRWBuffer;
-
(*KeywordMap)["struct"] = EHTokStruct;
(*KeywordMap)["cbuffer"] = EHTokCBuffer;
(*KeywordMap)["tbuffer"] = EHTokTBuffer;
// TODO: get correct set here
ReservedSet = new std::unordered_set<const char*, str_hash, str_eq>;
-
+
ReservedSet->insert("auto");
ReservedSet->insert("catch");
ReservedSet->insert("char");
EHTokRWTexture2darray,
EHTokRWTexture3d,
EHTokRWBuffer,
-
// variable, user type, ...
EHTokIdentifier,