Commit 1231c2ec by Rémi Verschelde

glslang: Sync with upstream 4fc7a33 for Vulkan SDK 1.2.131

Fixes #36888.
parent 214bc9e5
......@@ -162,11 +162,21 @@ the GLES version Godot targets.
## glslang
- Upstream: https://github.com/KhronosGroup/glslang
- Version: rev.3226
- Version: git (4fc7a33910fb8e40b970d160e1b38ab3f67fe0f3, 2020)
- License: glslang
Important: File `glslang/glslang/Include/Common.h` has
Godot-made change marked with `// -- GODOT --` comments.
Version should be kept in sync with the one of the used Vulkan SDK (see `vulkan`
section). Check Vulkan-ValidationLayers at the matching SDK tag for the known
good glslang commit: https://github.com/KhronosGroup/Vulkan-ValidationLayers/blob/master/scripts/known_good.json
Files extracted from upstream source:
- `glslang`, `OGLCompilersDLL`, `SPIRV`
- `LICENSE.txt`
- Unnecessary files like `CMakeLists.txt`, `revision.template` and
`updateGrammar` removed.
Patches in the `patches` directory should be re-applied after updates.
## jpeg-compressor
......
......@@ -34,5 +34,6 @@ static const char* const E_SPV_EXT_shader_stencil_export = "SPV_EXT_shade
static const char* const E_SPV_EXT_shader_viewport_index_layer = "SPV_EXT_shader_viewport_index_layer";
static const char* const E_SPV_EXT_fragment_fully_covered = "SPV_EXT_fragment_fully_covered";
static const char* const E_SPV_EXT_fragment_invocation_density = "SPV_EXT_fragment_invocation_density";
static const char* const E_SPV_EXT_demote_to_helper_invocation = "SPV_EXT_demote_to_helper_invocation";
#endif // #ifndef GLSLextEXT_H
......@@ -41,5 +41,8 @@ static const char* const E_SPV_KHR_storage_buffer_storage_class = "SPV_KHR_stora
static const char* const E_SPV_KHR_post_depth_coverage = "SPV_KHR_post_depth_coverage";
static const char* const E_SPV_KHR_vulkan_memory_model = "SPV_KHR_vulkan_memory_model";
static const char* const E_SPV_EXT_physical_storage_buffer = "SPV_EXT_physical_storage_buffer";
static const char* const E_SPV_KHR_physical_storage_buffer = "SPV_KHR_physical_storage_buffer";
static const char* const E_SPV_EXT_fragment_shader_interlock = "SPV_EXT_fragment_shader_interlock";
static const char* const E_SPV_KHR_shader_clock = "SPV_KHR_shader_clock";
#endif // #ifndef GLSLextKHR_H
......@@ -75,4 +75,7 @@ const char* const E_SPV_NV_shading_rate = "SPV_NV_shading_rate";
//SPV_NV_cooperative_matrix
const char* const E_SPV_NV_cooperative_matrix = "SPV_NV_cooperative_matrix";
//SPV_NV_shader_sm_builtins
const char* const E_SPV_NV_shader_sm_builtins = "SPV_NV_shader_sm_builtins";
#endif // #ifndef GLSLextNV_H
This source diff could not be displayed because it is too large. You can view the blob instead.
......@@ -40,7 +40,7 @@
#endif
#include "SpvTools.h"
#include "../glslang/Include/intermediate.h"
#include "glslang/Include/intermediate.h"
#include <string>
#include <vector>
......
......@@ -61,17 +61,22 @@ namespace {
// Use by calling visit() on the root block.
class ReadableOrderTraverser {
public:
explicit ReadableOrderTraverser(std::function<void(Block*)> callback) : callback_(callback) {}
ReadableOrderTraverser(std::function<void(Block*, spv::ReachReason, Block*)> callback)
: callback_(callback) {}
// Visits the block if it hasn't been visited already and isn't currently
// being delayed. Invokes callback(block), then descends into its
// being delayed. Invokes callback(block, why, header), then descends into its
// successors. Delays merge-block and continue-block processing until all
// the branches have been completed.
void visit(Block* block)
// the branches have been completed. If |block| is an unreachable merge block or
// an unreachable continue target, then |header| is the corresponding header block.
void visit(Block* block, spv::ReachReason why, Block* header)
{
assert(block);
if (why == spv::ReachViaControlFlow) {
reachableViaControlFlow_.insert(block);
}
if (visited_.count(block) || delayed_.count(block))
return;
callback_(block);
callback_(block, why, header);
visited_.insert(block);
Block* mergeBlock = nullptr;
Block* continueBlock = nullptr;
......@@ -87,27 +92,40 @@ public:
delayed_.insert(continueBlock);
}
}
const auto successors = block->getSuccessors();
for (auto it = successors.cbegin(); it != successors.cend(); ++it)
visit(*it);
if (why == spv::ReachViaControlFlow) {
const auto& successors = block->getSuccessors();
for (auto it = successors.cbegin(); it != successors.cend(); ++it)
visit(*it, why, nullptr);
}
if (continueBlock) {
const spv::ReachReason continueWhy =
(reachableViaControlFlow_.count(continueBlock) > 0)
? spv::ReachViaControlFlow
: spv::ReachDeadContinue;
delayed_.erase(continueBlock);
visit(continueBlock);
visit(continueBlock, continueWhy, block);
}
if (mergeBlock) {
const spv::ReachReason mergeWhy =
(reachableViaControlFlow_.count(mergeBlock) > 0)
? spv::ReachViaControlFlow
: spv::ReachDeadMerge;
delayed_.erase(mergeBlock);
visit(mergeBlock);
visit(mergeBlock, mergeWhy, block);
}
}
private:
std::function<void(Block*)> callback_;
std::function<void(Block*, spv::ReachReason, Block*)> callback_;
// Whether a block has already been visited or is being delayed.
std::unordered_set<Block *> visited_, delayed_;
// The set of blocks that actually are reached via control flow.
std::unordered_set<Block *> reachableViaControlFlow_;
};
}
void spv::inReadableOrder(Block* root, std::function<void(Block*)> callback)
void spv::inReadableOrder(Block* root, std::function<void(Block*, spv::ReachReason, Block*)> callback)
{
ReadableOrderTraverser(callback).visit(root);
ReadableOrderTraverser(callback).visit(root, spv::ReachViaControlFlow, nullptr);
}
......@@ -32,6 +32,8 @@
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
#ifndef GLSLANG_WEB
#include "Logger.h"
#include <algorithm>
......@@ -66,3 +68,5 @@ std::string SpvBuildLogger::getAllMessages() const {
}
} // end spv namespace
#endif
\ No newline at end of file
......@@ -46,6 +46,14 @@ class SpvBuildLogger {
public:
SpvBuildLogger() {}
#ifdef GLSLANG_WEB
void tbdFunctionality(const std::string& f) { }
void missingFunctionality(const std::string& f) { }
void warning(const std::string& w) { }
void error(const std::string& e) { errors.push_back(e); }
std::string getAllMessages() { return ""; }
#else
// Registers a TBD functionality.
void tbdFunctionality(const std::string& f);
// Registers a missing functionality.
......@@ -59,6 +67,7 @@ public:
// Returns all messages accumulated in the order of:
// TBD functionalities, missing functionalities, warnings, errors.
std::string getAllMessages() const;
#endif
private:
SpvBuildLogger(const SpvBuildLogger&);
......
......@@ -195,7 +195,7 @@ private:
// Header access & set methods
spirword_t magic() const { return spv[0]; } // return magic number
spirword_t bound() const { return spv[3]; } // return Id bound from header
spirword_t bound(spirword_t b) { return spv[3] = b; };
spirword_t bound(spirword_t b) { return spv[3] = b; }
spirword_t genmagic() const { return spv[2]; } // generator magic
spirword_t genmagic(spirword_t m) { return spv[2] = m; }
spirword_t schemaNum() const { return spv[4]; } // schema number from header
......
......@@ -46,7 +46,9 @@
#include "SpvBuilder.h"
#ifndef GLSLANG_WEB
#include "hex_float.h"
#endif
#ifndef _WIN32
#include <cstdio>
......@@ -230,6 +232,11 @@ Id Builder::makePointerFromForwardPointer(StorageClass storageClass, Id forwardP
Id Builder::makeIntegerType(int width, bool hasSign)
{
#ifdef GLSLANG_WEB
assert(width == 32);
width = 32;
#endif
// try to find it
Instruction* type;
for (int t = 0; t < (int)groupedTypes[OpTypeInt].size(); ++t) {
......@@ -265,6 +272,11 @@ Id Builder::makeIntegerType(int width, bool hasSign)
Id Builder::makeFloatType(int width)
{
#ifdef GLSLANG_WEB
assert(width == 32);
width = 32;
#endif
// try to find it
Instruction* type;
for (int t = 0; t < (int)groupedTypes[OpTypeFloat].size(); ++t) {
......@@ -516,6 +528,7 @@ Id Builder::makeImageType(Id sampledType, Dim dim, bool depth, bool arrayed, boo
constantsTypesGlobals.push_back(std::unique_ptr<Instruction>(type));
module.mapInstruction(type);
#ifndef GLSLANG_WEB
// deal with capabilities
switch (dim) {
case DimBuffer:
......@@ -561,6 +574,7 @@ Id Builder::makeImageType(Id sampledType, Dim dim, bool depth, bool arrayed, boo
addCapability(CapabilityImageMSArray);
}
}
#endif
return type->getResultId();
}
......@@ -586,7 +600,7 @@ Id Builder::makeSampledImageType(Id imageType)
return type->getResultId();
}
#ifdef NV_EXTENSIONS
#ifndef GLSLANG_WEB
Id Builder::makeAccelerationStructureNVType()
{
Instruction *type;
......@@ -602,6 +616,7 @@ Id Builder::makeAccelerationStructureNVType()
return type->getResultId();
}
#endif
Id Builder::getDerefTypeId(Id resultId) const
{
Id typeId = getTypeId(resultId);
......@@ -939,6 +954,10 @@ Id Builder::makeFloatConstant(float f, bool specConstant)
Id Builder::makeDoubleConstant(double d, bool specConstant)
{
#ifdef GLSLANG_WEB
assert(0);
return NoResult;
#else
Op opcode = specConstant ? OpSpecConstant : OpConstant;
Id typeId = makeFloatType(64);
union { double db; unsigned long long ull; } u;
......@@ -963,10 +982,15 @@ Id Builder::makeDoubleConstant(double d, bool specConstant)
module.mapInstruction(c);
return c->getResultId();
#endif
}
Id Builder::makeFloat16Constant(float f16, bool specConstant)
{
#ifdef GLSLANG_WEB
assert(0);
return NoResult;
#else
Op opcode = specConstant ? OpSpecConstant : OpConstant;
Id typeId = makeFloatType(16);
......@@ -991,25 +1015,33 @@ Id Builder::makeFloat16Constant(float f16, bool specConstant)
module.mapInstruction(c);
return c->getResultId();
#endif
}
Id Builder::makeFpConstant(Id type, double d, bool specConstant)
{
assert(isFloatType(type));
#ifdef GLSLANG_WEB
const int width = 32;
assert(width == getScalarTypeWidth(type));
#else
const int width = getScalarTypeWidth(type);
#endif
switch (getScalarTypeWidth(type)) {
case 16:
return makeFloat16Constant((float)d, specConstant);
case 32:
return makeFloatConstant((float)d, specConstant);
case 64:
return makeDoubleConstant(d, specConstant);
default:
break;
}
assert(isFloatType(type));
assert(false);
return NoResult;
switch (width) {
case 16:
return makeFloat16Constant((float)d, specConstant);
case 32:
return makeFloatConstant((float)d, specConstant);
case 64:
return makeDoubleConstant(d, specConstant);
default:
break;
}
assert(false);
return NoResult;
}
Id Builder::findCompositeConstant(Op typeClass, Id typeId, const std::vector<Id>& comps)
......@@ -1825,7 +1857,7 @@ Id Builder::createTextureCall(Decoration precision, Id resultType, bool sparse,
if (parameters.component != NoResult)
texArgs[numArgs++] = parameters.component;
#ifdef NV_EXTENSIONS
#ifndef GLSLANG_WEB
if (parameters.granularity != NoResult)
texArgs[numArgs++] = parameters.granularity;
if (parameters.coarse != NoResult)
......@@ -1872,6 +1904,7 @@ Id Builder::createTextureCall(Decoration precision, Id resultType, bool sparse,
mask = (ImageOperandsMask)(mask | ImageOperandsConstOffsetsMask);
texArgs[numArgs++] = parameters.offsets;
}
#ifndef GLSLANG_WEB
if (parameters.sample) {
mask = (ImageOperandsMask)(mask | ImageOperandsSampleMask);
texArgs[numArgs++] = parameters.sample;
......@@ -1889,6 +1922,7 @@ Id Builder::createTextureCall(Decoration precision, Id resultType, bool sparse,
if (parameters.volatil) {
mask = mask | ImageOperandsVolatileTexelKHRMask;
}
#endif
mask = mask | signExtensionMask;
if (mask == ImageOperandsMaskNone)
--numArgs; // undo speculative reservation for the mask argument
......@@ -1904,10 +1938,9 @@ Id Builder::createTextureCall(Decoration precision, Id resultType, bool sparse,
opCode = OpImageSparseFetch;
else
opCode = OpImageFetch;
#ifdef NV_EXTENSIONS
#ifndef GLSLANG_WEB
} else if (parameters.granularity && parameters.coarse) {
opCode = OpImageSampleFootprintNV;
#endif
} else if (gather) {
if (parameters.Dref)
if (sparse)
......@@ -1919,6 +1952,7 @@ Id Builder::createTextureCall(Decoration precision, Id resultType, bool sparse,
opCode = OpImageSparseGather;
else
opCode = OpImageGather;
#endif
} else if (explicitLod) {
if (parameters.Dref) {
if (proj)
......@@ -2067,11 +2101,7 @@ Id Builder::createTextureQueryCall(Op opCode, const TextureParameters& parameter
break;
}
case OpImageQueryLod:
#ifdef AMD_EXTENSIONS
resultType = makeVectorType(getScalarTypeId(getTypeId(parameters.coords)), 2);
#else
resultType = makeVectorType(makeFloatType(32), 2);
#endif
break;
case OpImageQueryLevels:
case OpImageQuerySamples:
......@@ -2089,6 +2119,7 @@ Id Builder::createTextureQueryCall(Op opCode, const TextureParameters& parameter
if (parameters.lod)
query->addIdOperand(parameters.lod);
buildPoint->addInstruction(std::unique_ptr<Instruction>(query));
addCapability(CapabilityImageQuery);
return query->getResultId();
}
......@@ -2282,7 +2313,12 @@ Id Builder::createMatrixConstructor(Decoration precision, const std::vector<Id>&
int numRows = getTypeNumRows(resultTypeId);
Instruction* instr = module.getInstruction(componentTypeId);
unsigned bitCount = instr->getImmediateOperand(0);
#ifdef GLSLANG_WEB
const unsigned bitCount = 32;
assert(bitCount == instr->getImmediateOperand(0));
#else
const unsigned bitCount = instr->getImmediateOperand(0);
#endif
// Optimize matrix constructed from a bigger matrix
if (isMatrix(sources[0]) && getNumColumns(sources[0]) >= numCols && getNumRows(sources[0]) >= numRows) {
......
......@@ -67,6 +67,7 @@ typedef enum {
Spv_1_2 = (1 << 16) | (2 << 8),
Spv_1_3 = (1 << 16) | (3 << 8),
Spv_1_4 = (1 << 16) | (4 << 8),
Spv_1_5 = (1 << 16) | (5 << 8),
} SpvVersion;
class Builder {
......@@ -105,6 +106,20 @@ public:
void addModuleProcessed(const std::string& p) { moduleProcesses.push_back(p.c_str()); }
void setEmitOpLines() { emitOpLines = true; }
void addExtension(const char* ext) { extensions.insert(ext); }
void removeExtension(const char* ext)
{
extensions.erase(ext);
}
void addIncorporatedExtension(const char* ext, SpvVersion incorporatedVersion)
{
if (getSpvVersion() < static_cast<unsigned>(incorporatedVersion))
addExtension(ext);
}
void promoteIncorporatedExtension(const char* baseExt, const char* promoExt, SpvVersion incorporatedVersion)
{
removeExtension(baseExt);
addIncorporatedExtension(promoExt, incorporatedVersion);
}
void addInclude(const std::string& name, const std::string& text)
{
spv::Id incId = getStringId(name);
......@@ -201,7 +216,11 @@ public:
bool isMatrixType(Id typeId) const { return getTypeClass(typeId) == OpTypeMatrix; }
bool isStructType(Id typeId) const { return getTypeClass(typeId) == OpTypeStruct; }
bool isArrayType(Id typeId) const { return getTypeClass(typeId) == OpTypeArray; }
#ifdef GLSLANG_WEB
bool isCooperativeMatrixType(Id typeId)const { return false; }
#else
bool isCooperativeMatrixType(Id typeId)const { return getTypeClass(typeId) == OpTypeCooperativeMatrixNV; }
#endif
bool isAggregateType(Id typeId) const { return isArrayType(typeId) || isStructType(typeId) || isCooperativeMatrixType(typeId); }
bool isImageType(Id typeId) const { return getTypeClass(typeId) == OpTypeImage; }
bool isSamplerType(Id typeId) const { return getTypeClass(typeId) == OpTypeSampler; }
......@@ -557,6 +576,14 @@ public:
// Accumulate whether anything in the chain of structures has coherent decorations.
struct CoherentFlags {
CoherentFlags() { clear(); }
#ifdef GLSLANG_WEB
void clear() { }
bool isVolatile() const { return false; }
CoherentFlags operator |=(const CoherentFlags &other) { return *this; }
#else
bool isVolatile() const { return volatil; }
unsigned coherent : 1;
unsigned devicecoherent : 1;
unsigned queuefamilycoherent : 1;
......@@ -577,7 +604,6 @@ public:
isImage = 0;
}
CoherentFlags() { clear(); }
CoherentFlags operator |=(const CoherentFlags &other) {
coherent |= other.coherent;
devicecoherent |= other.devicecoherent;
......@@ -589,6 +615,7 @@ public:
isImage |= other.isImage;
return *this;
}
#endif
};
CoherentFlags coherentFlags;
};
......@@ -656,16 +683,21 @@ public:
// based on the type of the base and the chain of dereferences.
Id accessChainGetInferredType();
// Add capabilities, extensions, remove unneeded decorations, etc.,
// Add capabilities, extensions, remove unneeded decorations, etc.,
// based on the resulting SPIR-V.
void postProcess();
// Prune unreachable blocks in the CFG and remove unneeded decorations.
void postProcessCFG();
#ifndef GLSLANG_WEB
// Add capabilities, extensions based on instructions in the module.
void postProcessFeatures();
// Hook to visit each instruction in a block in a function
void postProcess(Instruction&);
// Hook to visit each instruction in a reachable block in a function.
void postProcessReachable(const Instruction&);
// Hook to visit each non-32-bit sized float/int operation in a block.
void postProcessType(const Instruction&, spv::Id typeId);
#endif
void dump(std::vector<unsigned int>&) const;
......
......@@ -39,6 +39,7 @@
#include <cassert>
#include <cstdlib>
#include <unordered_map>
#include <unordered_set>
#include <algorithm>
......@@ -51,16 +52,13 @@ namespace spv {
#include "GLSL.std.450.h"
#include "GLSL.ext.KHR.h"
#include "GLSL.ext.EXT.h"
#ifdef AMD_EXTENSIONS
#include "GLSL.ext.AMD.h"
#endif
#ifdef NV_EXTENSIONS
#include "GLSL.ext.NV.h"
#endif
}
namespace spv {
#ifndef GLSLANG_WEB
// Hook to visit each operand type and result type of an instruction.
// Will be called multiple times for one instruction, once for each typed
// operand and the result.
......@@ -160,7 +158,6 @@ void Builder::postProcessType(const Instruction& inst, Id typeId)
}
break;
case OpExtInst:
#if AMD_EXTENSIONS
switch (inst.getImmediateOperand(1)) {
case GLSLstd450Frexp:
case GLSLstd450FrexpStruct:
......@@ -176,7 +173,6 @@ void Builder::postProcessType(const Instruction& inst, Id typeId)
default:
break;
}
#endif
break;
default:
if (basicTypeOp == OpTypeFloat && width == 16)
......@@ -222,12 +218,10 @@ void Builder::postProcess(Instruction& inst)
addCapability(CapabilityImageQuery);
break;
#ifdef NV_EXTENSIONS
case OpGroupNonUniformPartitionNV:
addExtension(E_SPV_NV_shader_subgroup_partitioned);
addCapability(CapabilityGroupNonUniformPartitionedNV);
break;
#endif
case OpLoad:
case OpStore:
......@@ -326,17 +320,16 @@ void Builder::postProcess(Instruction& inst)
}
}
}
// Called for each instruction in a reachable block.
void Builder::postProcessReachable(const Instruction&)
{
// did have code here, but questionable to do so without deleting the instructions
}
#endif
// comment in header
void Builder::postProcess()
void Builder::postProcessCFG()
{
// reachableBlocks is the set of blockss reached via control flow, or which are
// unreachable continue targert or unreachable merge.
std::unordered_set<const Block*> reachableBlocks;
std::unordered_map<Block*, Block*> headerForUnreachableContinue;
std::unordered_set<Block*> unreachableMerges;
std::unordered_set<Id> unreachableDefinitions;
// Collect IDs defined in unreachable blocks. For each function, label the
// reachable blocks first. Then for each unreachable block, collect the
......@@ -344,16 +337,41 @@ void Builder::postProcess()
for (auto fi = module.getFunctions().cbegin(); fi != module.getFunctions().cend(); fi++) {
Function* f = *fi;
Block* entry = f->getEntryBlock();
inReadableOrder(entry, [&reachableBlocks](const Block* b) { reachableBlocks.insert(b); });
inReadableOrder(entry,
[&reachableBlocks, &unreachableMerges, &headerForUnreachableContinue]
(Block* b, ReachReason why, Block* header) {
reachableBlocks.insert(b);
if (why == ReachDeadContinue) headerForUnreachableContinue[b] = header;
if (why == ReachDeadMerge) unreachableMerges.insert(b);
});
for (auto bi = f->getBlocks().cbegin(); bi != f->getBlocks().cend(); bi++) {
Block* b = *bi;
if (reachableBlocks.count(b) == 0) {
for (auto ii = b->getInstructions().cbegin(); ii != b->getInstructions().cend(); ii++)
if (unreachableMerges.count(b) != 0 || headerForUnreachableContinue.count(b) != 0) {
auto ii = b->getInstructions().cbegin();
++ii; // Keep potential decorations on the label.
for (; ii != b->getInstructions().cend(); ++ii)
unreachableDefinitions.insert(ii->get()->getResultId());
} else if (reachableBlocks.count(b) == 0) {
// The normal case for unreachable code. All definitions are considered dead.
for (auto ii = b->getInstructions().cbegin(); ii != b->getInstructions().cend(); ++ii)
unreachableDefinitions.insert(ii->get()->getResultId());
}
}
}
// Modify unreachable merge blocks and unreachable continue targets.
// Delete their contents.
for (auto mergeIter = unreachableMerges.begin(); mergeIter != unreachableMerges.end(); ++mergeIter) {
(*mergeIter)->rewriteAsCanonicalUnreachableMerge();
}
for (auto continueIter = headerForUnreachableContinue.begin();
continueIter != headerForUnreachableContinue.end();
++continueIter) {
Block* continue_target = continueIter->first;
Block* header = continueIter->second;
continue_target->rewriteAsCanonicalUnreachableContinue(header);
}
// Remove unneeded decorations, for unreachable instructions
decorations.erase(std::remove_if(decorations.begin(), decorations.end(),
[&unreachableDefinitions](std::unique_ptr<Instruction>& I) -> bool {
......@@ -361,7 +379,11 @@ void Builder::postProcess()
return unreachableDefinitions.count(decoration_id) != 0;
}),
decorations.end());
}
#ifndef GLSLANG_WEB
// comment in header
void Builder::postProcessFeatures() {
// Add per-instruction capabilities, extensions, etc.,
// Look for any 8/16 bit type in physical storage buffer class, and set the
......@@ -371,24 +393,17 @@ void Builder::postProcess()
Instruction* type = groupedTypes[OpTypePointer][t];
if (type->getImmediateOperand(0) == (unsigned)StorageClassPhysicalStorageBufferEXT) {
if (containsType(type->getIdOperand(1), OpTypeInt, 8)) {
addExtension(spv::E_SPV_KHR_8bit_storage);
addIncorporatedExtension(spv::E_SPV_KHR_8bit_storage, spv::Spv_1_5);
addCapability(spv::CapabilityStorageBuffer8BitAccess);
}
if (containsType(type->getIdOperand(1), OpTypeInt, 16) ||
containsType(type->getIdOperand(1), OpTypeFloat, 16)) {
addExtension(spv::E_SPV_KHR_16bit_storage);
addIncorporatedExtension(spv::E_SPV_KHR_16bit_storage, spv::Spv_1_3);
addCapability(spv::CapabilityStorageBuffer16BitAccess);
}
}
}
// process all reachable instructions...
for (auto bi = reachableBlocks.cbegin(); bi != reachableBlocks.cend(); ++bi) {
const Block* block = *bi;
const auto function = [this](const std::unique_ptr<Instruction>& inst) { postProcessReachable(*inst.get()); };
std::for_each(block->getInstructions().begin(), block->getInstructions().end(), function);
}
// process all block-contained instructions
for (auto fi = module.getFunctions().cbegin(); fi != module.getFunctions().cend(); fi++) {
Function* f = *fi;
......@@ -422,5 +437,14 @@ void Builder::postProcess()
}
}
}
#endif
// comment in header
void Builder::postProcess() {
postProcessCFG();
#ifndef GLSLANG_WEB
postProcessFeatures();
#endif
}
}; // end spv namespace
......@@ -67,6 +67,8 @@ spv_target_env MapToSpirvToolsEnv(const SpvVersion& spvVersion, spv::SpvBuildLog
logger->missingFunctionality("Target version for SPIRV-Tools validator");
return spv_target_env::SPV_ENV_VULKAN_1_1;
}
case glslang::EShTargetVulkan_1_2:
return spv_target_env::SPV_ENV_VULKAN_1_2;
default:
break;
}
......@@ -103,7 +105,7 @@ void SpirvToolsDisassemble(std::ostream& out, const std::vector<unsigned int>& s
// Apply the SPIRV-Tools validator to generated SPIR-V.
void SpirvToolsValidate(const glslang::TIntermediate& intermediate, std::vector<unsigned int>& spirv,
spv::SpvBuildLogger* logger)
spv::SpvBuildLogger* logger, bool prelegalization)
{
// validate
spv_context context = spvContextCreate(MapToSpirvToolsEnv(intermediate.getSpv(), logger));
......@@ -111,6 +113,7 @@ void SpirvToolsValidate(const glslang::TIntermediate& intermediate, std::vector<
spv_diagnostic diagnostic = nullptr;
spv_validator_options options = spvValidatorOptionsCreate();
spvValidatorOptionsSetRelaxBlockLayout(options, intermediate.usingHlslOffsets());
spvValidatorOptionsSetBeforeHlslLegalization(options, prelegalization);
spvValidateWithOptions(context, options, &binary, &diagnostic);
// report
......@@ -172,6 +175,7 @@ void SpirvToolsLegalize(const glslang::TIntermediate&, std::vector<unsigned int>
if (options->generateDebugInfo) {
optimizer.RegisterPass(spvtools::CreatePropagateLineInfoPass());
}
optimizer.RegisterPass(spvtools::CreateWrapOpKillPass());
optimizer.RegisterPass(spvtools::CreateDeadBranchElimPass());
optimizer.RegisterPass(spvtools::CreateMergeReturnPass());
optimizer.RegisterPass(spvtools::CreateInlineExhaustivePass());
......@@ -195,8 +199,6 @@ void SpirvToolsLegalize(const glslang::TIntermediate&, std::vector<unsigned int>
optimizer.RegisterPass(spvtools::CreateDeadInsertElimPass());
if (options->optimizeSize) {
optimizer.RegisterPass(spvtools::CreateRedundancyEliminationPass());
// TODO(greg-lunarg): Add this when AMD driver issues are resolved
// optimizer.RegisterPass(CreateCommonUniformElimPass());
}
optimizer.RegisterPass(spvtools::CreateAggressiveDCEPass());
optimizer.RegisterPass(spvtools::CreateCFGCleanupPass());
......
......@@ -41,10 +41,12 @@
#ifndef GLSLANG_SPV_TOOLS_H
#define GLSLANG_SPV_TOOLS_H
#ifdef ENABLE_OPT
#include <vector>
#include <ostream>
#endif
#include "../glslang/MachineIndependent/localintermediate.h"
#include "glslang/MachineIndependent/localintermediate.h"
#include "Logger.h"
namespace glslang {
......@@ -59,14 +61,14 @@ struct SpvOptions {
bool validate;
};
#if ENABLE_OPT
#ifdef ENABLE_OPT
// Use the SPIRV-Tools disassembler to print SPIR-V.
void SpirvToolsDisassemble(std::ostream& out, const std::vector<unsigned int>& spirv);
// Apply the SPIRV-Tools validator to generated SPIR-V.
void SpirvToolsValidate(const glslang::TIntermediate& intermediate, std::vector<unsigned int>& spirv,
spv::SpvBuildLogger*);
spv::SpvBuildLogger*, bool prelegalization);
// Apply the SPIRV-Tools optimizer to generated SPIR-V, for the purpose of
// legalizing HLSL SPIR-V.
......
......@@ -52,26 +52,16 @@ namespace spv {
extern "C" {
// Include C-based headers that don't have a namespace
#include "GLSL.std.450.h"
#ifdef AMD_EXTENSIONS
#include "GLSL.ext.AMD.h"
#endif
#ifdef NV_EXTENSIONS
#include "GLSL.ext.NV.h"
#endif
}
}
const char* GlslStd450DebugNames[spv::GLSLstd450Count];
namespace spv {
#ifdef AMD_EXTENSIONS
static const char* GLSLextAMDGetDebugNames(const char*, unsigned);
#endif
#ifdef NV_EXTENSIONS
static const char* GLSLextNVGetDebugNames(const char*, unsigned);
#endif
static void Kill(std::ostream& out, const char* message)
{
......@@ -82,15 +72,8 @@ static void Kill(std::ostream& out, const char* message)
// used to identify the extended instruction library imported when printing
enum ExtInstSet {
GLSL450Inst,
#ifdef AMD_EXTENSIONS
GLSLextAMDInst,
#endif
#ifdef NV_EXTENSIONS
GLSLextNVInst,
#endif
OpenCLExtInst,
};
......@@ -499,37 +482,29 @@ void SpirvStream::disassembleInstruction(Id resultId, Id /*typeId*/, Op opCode,
const char* name = idDescriptor[stream[word - 2]].c_str();
if (0 == memcmp("OpenCL", name, 6)) {
extInstSet = OpenCLExtInst;
#ifdef AMD_EXTENSIONS
} else if (strcmp(spv::E_SPV_AMD_shader_ballot, name) == 0 ||
strcmp(spv::E_SPV_AMD_shader_trinary_minmax, name) == 0 ||
strcmp(spv::E_SPV_AMD_shader_explicit_vertex_parameter, name) == 0 ||
strcmp(spv::E_SPV_AMD_gcn_shader, name) == 0) {
extInstSet = GLSLextAMDInst;
#endif
#ifdef NV_EXTENSIONS
}else if (strcmp(spv::E_SPV_NV_sample_mask_override_coverage, name) == 0 ||
} else if (strcmp(spv::E_SPV_NV_sample_mask_override_coverage, name) == 0 ||
strcmp(spv::E_SPV_NV_geometry_shader_passthrough, name) == 0 ||
strcmp(spv::E_SPV_NV_viewport_array2, name) == 0 ||
strcmp(spv::E_SPV_NVX_multiview_per_view_attributes, name) == 0 ||
strcmp(spv::E_SPV_NV_fragment_shader_barycentric, name) == 0 ||
strcmp(spv::E_SPV_NV_mesh_shader, name) == 0) {
extInstSet = GLSLextNVInst;
#endif
}
unsigned entrypoint = stream[word - 1];
if (extInstSet == GLSL450Inst) {
if (entrypoint < GLSLstd450Count) {
out << "(" << GlslStd450DebugNames[entrypoint] << ")";
}
#ifdef AMD_EXTENSIONS
} else if (extInstSet == GLSLextAMDInst) {
out << "(" << GLSLextAMDGetDebugNames(name, entrypoint) << ")";
#endif
#ifdef NV_EXTENSIONS
}
else if (extInstSet == GLSLextNVInst) {
out << "(" << GLSLextNVGetDebugNames(name, entrypoint) << ")";
#endif
}
}
break;
......@@ -648,9 +623,11 @@ static void GLSLstd450GetDebugNames(const char** names)
names[GLSLstd450InterpolateAtCentroid] = "InterpolateAtCentroid";
names[GLSLstd450InterpolateAtSample] = "InterpolateAtSample";
names[GLSLstd450InterpolateAtOffset] = "InterpolateAtOffset";
names[GLSLstd450NMin] = "NMin";
names[GLSLstd450NMax] = "NMax";
names[GLSLstd450NClamp] = "NClamp";
}
#ifdef AMD_EXTENSIONS
static const char* GLSLextAMDGetDebugNames(const char* name, unsigned entrypoint)
{
if (strcmp(name, spv::E_SPV_AMD_shader_ballot) == 0) {
......@@ -692,18 +669,17 @@ static const char* GLSLextAMDGetDebugNames(const char* name, unsigned entrypoint
return "Bad";
}
#endif
#ifdef NV_EXTENSIONS
static const char* GLSLextNVGetDebugNames(const char* name, unsigned entrypoint)
{
if (strcmp(name, spv::E_SPV_NV_sample_mask_override_coverage) == 0 ||
strcmp(name, spv::E_SPV_NV_geometry_shader_passthrough) == 0 ||
strcmp(name, spv::E_ARB_shader_viewport_layer_array) == 0 ||
strcmp(name, spv::E_SPV_NV_viewport_array2) == 0 ||
strcmp(spv::E_SPV_NVX_multiview_per_view_attributes, name) == 0 ||
strcmp(spv::E_SPV_NV_fragment_shader_barycentric, name) == 0 ||
strcmp(name, spv::E_SPV_NV_mesh_shader) == 0) {
strcmp(name, spv::E_SPV_NVX_multiview_per_view_attributes) == 0 ||
strcmp(name, spv::E_SPV_NV_fragment_shader_barycentric) == 0 ||
strcmp(name, spv::E_SPV_NV_mesh_shader) == 0 ||
strcmp(name, spv::E_SPV_NV_shader_image_footprint) == 0) {
switch (entrypoint) {
// NV builtins
case BuiltInViewportMaskNV: return "ViewportMaskNV";
......@@ -729,6 +705,8 @@ static const char* GLSLextNVGetDebugNames(const char* name, unsigned entrypoint)
case CapabilityPerViewAttributesNV: return "PerViewAttributesNV";
case CapabilityFragmentBarycentricNV: return "FragmentBarycentricNV";
case CapabilityMeshShadingNV: return "MeshShadingNV";
case CapabilityImageFootprintNV: return "ImageFootprintNV";
case CapabilitySampleMaskOverrideCoverageNV:return "SampleMaskOverrideCoverageNV";
// NV Decorations
case DecorationOverrideCoverageNV: return "OverrideCoverageNV";
......@@ -745,7 +723,6 @@ static const char* GLSLextNVGetDebugNames(const char* name, unsigned entrypoint)
}
return "Bad";
}
#endif
void Disassemble(std::ostream& out, const std::vector<unsigned int>& stream)
{
......
......@@ -226,6 +226,36 @@ public:
return nullptr;
}
// Change this block into a canonical dead merge block. Delete instructions
// as necessary. A canonical dead merge block has only an OpLabel and an
// OpUnreachable.
void rewriteAsCanonicalUnreachableMerge() {
assert(localVariables.empty());
// Delete all instructions except for the label.
assert(instructions.size() > 0);
instructions.resize(1);
successors.clear();
Instruction* unreachable = new Instruction(OpUnreachable);
addInstruction(std::unique_ptr<Instruction>(unreachable));
}
// Change this block into a canonical dead continue target branching to the
// given header ID. Delete instructions as necessary. A canonical dead continue
// target has only an OpLabel and an unconditional branch back to the corresponding
// header.
void rewriteAsCanonicalUnreachableContinue(Block* header) {
assert(localVariables.empty());
// Delete all instructions except for the label.
assert(instructions.size() > 0);
instructions.resize(1);
successors.clear();
// Add OpBranch back to the header.
assert(header != nullptr);
Instruction* branch = new Instruction(OpBranch);
branch->addIdOperand(header->getId());
addInstruction(std::unique_ptr<Instruction>(branch));
successors.push_back(header);
}
bool isTerminated() const
{
switch (instructions.back()->getOpCode()) {
......@@ -235,6 +265,7 @@ public:
case OpKill:
case OpReturn:
case OpReturnValue:
case OpUnreachable:
return true;
default:
return false;
......@@ -268,10 +299,24 @@ protected:
bool unreachable;
};
// The different reasons for reaching a block in the inReadableOrder traversal.
enum ReachReason {
// Reachable from the entry block via transfers of control, i.e. branches.
ReachViaControlFlow = 0,
// A continue target that is not reachable via control flow.
ReachDeadContinue,
// A merge block that is not reachable via control flow.
ReachDeadMerge
};
// Traverses the control-flow graph rooted at root in an order suited for
// readable code generation. Invokes callback at every node in the traversal
// order.
void inReadableOrder(Block* root, std::function<void(Block*)> callback);
// order. The callback arguments are:
// - the block,
// - the reason we reached the block,
// - if the reason was that block is an unreachable continue or unreachable merge block
// then the last parameter is the corresponding header block.
void inReadableOrder(Block* root, std::function<void(Block*, ReachReason, Block* header)> callback);
//
// SPIR-V IR Function.
......@@ -321,7 +366,7 @@ public:
parameterInstructions[p]->dump(out);
// Blocks
inReadableOrder(blocks[0], [&out](const Block* b) { b->dump(out); });
inReadableOrder(blocks[0], [&out](const Block* b, ReachReason, Block*) { b->dump(out); });
Instruction end(0, 0, OpFunctionEnd);
end.dump(out);
}
......@@ -436,6 +481,6 @@ __inline void Block::addInstruction(std::unique_ptr<Instruction> inst)
parent.getParent().mapInstruction(raw_instruction);
}
}; // end spv namespace
} // end spv namespace
#endif // spvIR_H
......@@ -61,11 +61,7 @@ enum TBasicType {
EbtSampler,
EbtStruct,
EbtBlock,
#ifdef NV_EXTENSIONS
EbtAccStructNV,
#endif
EbtReference,
// HLSL types that live only temporarily.
......@@ -94,13 +90,11 @@ enum TStorageQualifier {
EvqBuffer, // read/write, shared with app
EvqShared, // compute shader's read/write 'shared' qualifier
#ifdef NV_EXTENSIONS
EvqPayloadNV,
EvqPayloadInNV,
EvqHitAttrNV,
EvqCallableDataNV,
EvqCallableDataInNV,
#endif
// parameters
EvqIn, // also, for 'in' in the grammar before we know if it's a pipeline input or an 'in' parameter
......@@ -221,7 +215,6 @@ enum TBuiltInVariable {
EbvSampleMask,
EbvHelperInvocation,
#ifdef AMD_EXTENSIONS
EbvBaryCoordNoPersp,
EbvBaryCoordNoPerspCentroid,
EbvBaryCoordNoPerspSample,
......@@ -229,7 +222,6 @@ enum TBuiltInVariable {
EbvBaryCoordSmoothCentroid,
EbvBaryCoordSmoothSample,
EbvBaryCoordPullModel,
#endif
EbvViewIndex,
EbvDeviceIndex,
......@@ -237,7 +229,6 @@ enum TBuiltInVariable {
EbvFragSizeEXT,
EbvFragInvocationCountEXT,
#ifdef NV_EXTENSIONS
EbvViewportMaskNV,
EbvSecondaryPositionNV,
EbvSecondaryViewportMaskNV,
......@@ -246,7 +237,7 @@ enum TBuiltInVariable {
EbvFragFullyCoveredNV,
EbvFragmentSizeNV,
EbvInvocationsPerPixelNV,
// raytracing
// ray tracing
EbvLaunchIdNV,
EbvLaunchSizeNV,
EbvInstanceCustomIndexNV,
......@@ -261,8 +252,10 @@ enum TBuiltInVariable {
EbvObjectToWorldNV,
EbvWorldToObjectNV,
EbvIncomingRayFlagsNV,
// barycentrics
EbvBaryCoordNV,
EbvBaryCoordNoPerspNV,
// mesh shaders
EbvTaskCountNV,
EbvPrimitiveCountNV,
EbvPrimitiveIndicesNV,
......@@ -271,7 +264,12 @@ enum TBuiltInVariable {
EbvLayerPerViewNV,
EbvMeshViewCountNV,
EbvMeshViewIndicesNV,
#endif
// sm builtins
EbvWarpsPerSM,
EbvSMCount,
EbvWarpID,
EbvSMID,
// HLSL built-ins that live only temporarily, until they get remapped
// to one of the above.
......@@ -291,6 +289,19 @@ enum TBuiltInVariable {
EbvLast
};
// In this enum, order matters; users can assume higher precision is a bigger value
// and EpqNone is 0.
enum TPrecisionQualifier {
EpqNone = 0,
EpqLow,
EpqMedium,
EpqHigh
};
#ifdef GLSLANG_WEB
__inline const char* GetStorageQualifierString(TStorageQualifier q) { return ""; }
__inline const char* GetPrecisionQualifierString(TPrecisionQualifier p) { return ""; }
#else
// These will show up in error messages
__inline const char* GetStorageQualifierString(TStorageQualifier q)
{
......@@ -317,13 +328,11 @@ __inline const char* GetStorageQualifierString(TStorageQualifier q)
case EvqPointCoord: return "gl_PointCoord"; break;
case EvqFragColor: return "fragColor"; break;
case EvqFragDepth: return "gl_FragDepth"; break;
#ifdef NV_EXTENSIONS
case EvqPayloadNV: return "rayPayloadNV"; break;
case EvqPayloadInNV: return "rayPayloadInNV"; break;
case EvqHitAttrNV: return "hitAttributeNV"; break;
case EvqCallableDataNV: return "callableDataNV"; break;
case EvqCallableDataInNV: return "callableDataInNV"; break;
#endif
default: return "unknown qualifier";
}
}
......@@ -338,6 +347,8 @@ __inline const char* GetBuiltInVariableString(TBuiltInVariable v)
case EbvLocalInvocationId: return "LocalInvocationID";
case EbvGlobalInvocationId: return "GlobalInvocationID";
case EbvLocalInvocationIndex: return "LocalInvocationIndex";
case EbvNumSubgroups: return "NumSubgroups";
case EbvSubgroupID: return "SubgroupID";
case EbvSubGroupSize: return "SubGroupSize";
case EbvSubGroupInvocation: return "SubGroupInvocation";
case EbvSubGroupEqMask: return "SubGroupEqMask";
......@@ -345,6 +356,13 @@ __inline const char* GetBuiltInVariableString(TBuiltInVariable v)
case EbvSubGroupGtMask: return "SubGroupGtMask";
case EbvSubGroupLeMask: return "SubGroupLeMask";
case EbvSubGroupLtMask: return "SubGroupLtMask";
case EbvSubgroupSize2: return "SubgroupSize";
case EbvSubgroupInvocation2: return "SubgroupInvocationID";
case EbvSubgroupEqMask2: return "SubgroupEqMask";
case EbvSubgroupGeMask2: return "SubgroupGeMask";
case EbvSubgroupGtMask2: return "SubgroupGtMask";
case EbvSubgroupLeMask2: return "SubgroupLeMask";
case EbvSubgroupLtMask2: return "SubgroupLtMask";
case EbvVertexId: return "VertexId";
case EbvInstanceId: return "InstanceId";
case EbvVertexIndex: return "VertexIndex";
......@@ -396,7 +414,6 @@ __inline const char* GetBuiltInVariableString(TBuiltInVariable v)
case EbvSampleMask: return "SampleMaskIn";
case EbvHelperInvocation: return "HelperInvocation";
#ifdef AMD_EXTENSIONS
case EbvBaryCoordNoPersp: return "BaryCoordNoPersp";
case EbvBaryCoordNoPerspCentroid: return "BaryCoordNoPerspCentroid";
case EbvBaryCoordNoPerspSample: return "BaryCoordNoPerspSample";
......@@ -404,7 +421,6 @@ __inline const char* GetBuiltInVariableString(TBuiltInVariable v)
case EbvBaryCoordSmoothCentroid: return "BaryCoordSmoothCentroid";
case EbvBaryCoordSmoothSample: return "BaryCoordSmoothSample";
case EbvBaryCoordPullModel: return "BaryCoordPullModel";
#endif
case EbvViewIndex: return "ViewIndex";
case EbvDeviceIndex: return "DeviceIndex";
......@@ -412,7 +428,6 @@ __inline const char* GetBuiltInVariableString(TBuiltInVariable v)
case EbvFragSizeEXT: return "FragSizeEXT";
case EbvFragInvocationCountEXT: return "FragInvocationCountEXT";
#ifdef NV_EXTENSIONS
case EbvViewportMaskNV: return "ViewportMaskNV";
case EbvSecondaryPositionNV: return "SecondaryPositionNV";
case EbvSecondaryViewportMaskNV: return "SecondaryViewportMaskNV";
......@@ -438,6 +453,7 @@ __inline const char* GetBuiltInVariableString(TBuiltInVariable v)
case EbvBaryCoordNV: return "BaryCoordNV";
case EbvBaryCoordNoPerspNV: return "BaryCoordNoPerspNV";
case EbvTaskCountNV: return "TaskCountNV";
case EbvPrimitiveCountNV: return "PrimitiveCountNV";
case EbvPrimitiveIndicesNV: return "PrimitiveIndicesNV";
......@@ -446,20 +462,16 @@ __inline const char* GetBuiltInVariableString(TBuiltInVariable v)
case EbvLayerPerViewNV: return "LayerPerViewNV";
case EbvMeshViewCountNV: return "MeshViewCountNV";
case EbvMeshViewIndicesNV: return "MeshViewIndicesNV";
#endif
case EbvWarpsPerSM: return "WarpsPerSMNV";
case EbvSMCount: return "SMCountNV";
case EbvWarpID: return "WarpIDNV";
case EbvSMID: return "SMIDNV";
default: return "unknown built-in variable";
}
}
// In this enum, order matters; users can assume higher precision is a bigger value
// and EpqNone is 0.
enum TPrecisionQualifier {
EpqNone = 0,
EpqLow,
EpqMedium,
EpqHigh
};
__inline const char* GetPrecisionQualifierString(TPrecisionQualifier p)
{
switch (p) {
......@@ -470,6 +482,7 @@ __inline const char* GetPrecisionQualifierString(TPrecisionQualifier p)
default: return "unknown precision qualifier";
}
}
#endif
__inline bool isTypeSignedInt(TBasicType type)
{
......@@ -514,7 +527,8 @@ __inline bool isTypeFloat(TBasicType type)
}
}
__inline int getTypeRank(TBasicType type) {
__inline int getTypeRank(TBasicType type)
{
int res = -1;
switch(type) {
case EbtInt8:
......
......@@ -51,7 +51,7 @@ std::string to_string(const T& val) {
#endif
// -- GODOT start --
#if (defined(_MSC_VER) && _MSC_VER < 1900 /*vs2015*/) /* || defined MINGW_HAS_SECURE_API*/
#if (defined(_MSC_VER) && _MSC_VER < 1900 /*vs2015*/) /* || defined MINGW_HAS_SECURE_API */
// -- GODOT end --
#include <basetsd.h>
#ifndef snprintf
......
......@@ -304,7 +304,6 @@ public:
size_type max_size() const { return static_cast<size_type>(-1) / sizeof(T); }
size_type max_size(int size) const { return static_cast<size_type>(-1) / size; }
void setAllocator(TPoolAllocator* a) { allocator = *a; }
TPoolAllocator& getAllocator() const { return allocator; }
protected:
......
// This header is generated by the make-revision script.
#define GLSLANG_PATCH_LEVEL 3226
#define GLSLANG_PATCH_LEVEL 3559
This source diff could not be displayed because it is too large. You can view the blob instead.
......@@ -91,6 +91,8 @@ public:
void identifyBuiltIns(int version, EProfile profile, const SpvVersion& spvVersion, EShLanguage language, TSymbolTable& symbolTable, const TBuiltInResource &resources);
protected:
void addTabledBuiltins(int version, EProfile profile, const SpvVersion& spvVersion);
void relateTabledBuiltins(int version, EProfile profile, const SpvVersion& spvVersion, EShLanguage, TSymbolTable&);
void add2ndGenerationSamplingImaging(int version, EProfile profile, const SpvVersion& spvVersion);
void addSubpassSampling(TSampler, const TString& typeName, int version, EProfile profile);
void addQueryFunctions(TSampler, const TString& typeName, int version, EProfile profile);
......
......@@ -67,6 +67,8 @@ void TParseContextBase::outputMessage(const TSourceLoc& loc, const char* szReaso
}
}
#if !defined(GLSLANG_WEB) || defined(GLSLANG_WEB_DEVEL)
void C_DECL TParseContextBase::error(const TSourceLoc& loc, const char* szReason, const char* szToken,
const char* szExtraInfoFormat, ...)
{
......@@ -113,6 +115,8 @@ void C_DECL TParseContextBase::ppWarn(const TSourceLoc& loc, const char* szReaso
va_end(args);
}
#endif
//
// Both test and if necessary, spit out an error, to see if the node is really
// an l-value that can be operated on this way.
......@@ -149,15 +153,13 @@ bool TParseContextBase::lValueErrorCheck(const TSourceLoc& loc, const char* op,
case EvqConst: message = "can't modify a const"; break;
case EvqConstReadOnly: message = "can't modify a const"; break;
case EvqUniform: message = "can't modify a uniform"; break;
#ifndef GLSLANG_WEB
case EvqBuffer:
if (node->getQualifier().readonly)
if (node->getQualifier().isReadOnly())
message = "can't modify a readonly buffer";
#ifdef NV_EXTENSIONS
if (node->getQualifier().layoutShaderRecordNV)
if (node->getQualifier().isShaderRecordNV())
message = "can't modify a shaderrecordnv qualified buffer";
#endif
break;
#ifdef NV_EXTENSIONS
case EvqHitAttrNV:
if (language != EShLangIntersectNV)
message = "cannot modify hitAttributeNV in this stage";
......@@ -172,13 +174,13 @@ bool TParseContextBase::lValueErrorCheck(const TSourceLoc& loc, const char* op,
case EbtSampler:
message = "can't modify a sampler";
break;
case EbtAtomicUint:
message = "can't modify an atomic_uint";
break;
case EbtVoid:
message = "can't modify void";
break;
#ifdef NV_EXTENSIONS
#ifndef GLSLANG_WEB
case EbtAtomicUint:
message = "can't modify an atomic_uint";
break;
case EbtAccStructNV:
message = "can't modify accelerationStructureNV";
break;
......@@ -234,7 +236,7 @@ void TParseContextBase::rValueErrorCheck(const TSourceLoc& loc, const char* op,
}
TIntermSymbol* symNode = node->getAsSymbolNode();
if (symNode && symNode->getQualifier().writeonly)
if (symNode && symNode->getQualifier().isWriteOnly())
error(loc, "can't read from writeonly object: ", op, symNode->getName().c_str());
}
......@@ -254,11 +256,17 @@ void TParseContextBase::trackLinkage(TSymbol& symbol)
// Give an error if not.
void TParseContextBase::checkIndex(const TSourceLoc& loc, const TType& type, int& index)
{
const auto sizeIsSpecializationExpression = [&type]() {
return type.containsSpecializationSize() &&
type.getArraySizes()->getOuterNode() != nullptr &&
type.getArraySizes()->getOuterNode()->getAsSymbolNode() == nullptr; };
if (index < 0) {
error(loc, "", "[", "index out of range '%d'", index);
index = 0;
} else if (type.isArray()) {
if (type.isSizedArray() && index >= type.getOuterArraySize()) {
if (type.isSizedArray() && !sizeIsSpecializationExpression() &&
index >= type.getOuterArraySize()) {
error(loc, "", "[", "array index out of range '%d'", index);
index = type.getOuterArraySize() - 1;
}
......@@ -568,6 +576,7 @@ void TParseContextBase::parseSwizzleSelector(const TSourceLoc& loc, const TStrin
selector.push_back(0);
}
#ifdef ENABLE_HLSL
//
// Make the passed-in variable information become a member of the
// global uniform block. If this doesn't exist yet, make it.
......@@ -612,6 +621,7 @@ void TParseContextBase::growGlobalUniformBlock(const TSourceLoc& loc, TType& mem
++firstNewMember;
}
#endif
void TParseContextBase::finish()
{
......
This source diff could not be displayed because it is too large. You can view the blob instead.
......@@ -85,6 +85,7 @@ public:
statementNestingLevel(0), loopNestingLevel(0), structNestingLevel(0), controlFlowNestingLevel(0),
postEntryPointReturn(false),
contextPragma(true, false),
beginInvocationInterlockCount(0), endInvocationInterlockCount(0),
parsingBuiltins(parsingBuiltins), scanContext(nullptr), ppContext(nullptr),
limits(resources.limits),
globalUniformBlock(nullptr),
......@@ -96,6 +97,7 @@ public:
}
virtual ~TParseContextBase() { }
#if !defined(GLSLANG_WEB) || defined(GLSLANG_WEB_DEVEL)
virtual void C_DECL error(const TSourceLoc&, const char* szReason, const char* szToken,
const char* szExtraInfoFormat, ...);
virtual void C_DECL warn(const TSourceLoc&, const char* szReason, const char* szToken,
......@@ -104,6 +106,7 @@ public:
const char* szExtraInfoFormat, ...);
virtual void C_DECL ppWarn(const TSourceLoc&, const char* szReason, const char* szToken,
const char* szExtraInfoFormat, ...);
#endif
virtual void setLimits(const TBuiltInResource&) = 0;
......@@ -149,8 +152,10 @@ public:
extensionCallback(line, extension, behavior);
}
#ifdef ENABLE_HLSL
// Manage the global uniform block (default uniforms in GLSL, $Global in HLSL)
virtual void growGlobalUniformBlock(const TSourceLoc&, TType&, const TString& memberName, TTypeList* typeList = nullptr);
#endif
// Potentially rename shader entry point function
void renameShaderFunction(TString*& name) const
......@@ -182,6 +187,8 @@ public:
// the statementNestingLevel the current switch statement is at, which must match the level of its case statements
TList<int> switchLevel;
struct TPragma contextPragma;
int beginInvocationInterlockCount;
int endInvocationInterlockCount;
protected:
TParseContextBase(TParseContextBase&);
......@@ -276,7 +283,7 @@ public:
const TString* entryPoint = nullptr);
virtual ~TParseContext();
bool obeyPrecisionQualifiers() const { return precisionManager.respectingPrecisionQualifiers(); };
bool obeyPrecisionQualifiers() const { return precisionManager.respectingPrecisionQualifiers(); }
void setPrecisionDefaults();
void setLimits(const TBuiltInResource&) override;
......@@ -294,10 +301,12 @@ public:
TIntermTyped* handleBracketDereference(const TSourceLoc&, TIntermTyped* base, TIntermTyped* index);
void handleIndexLimits(const TSourceLoc&, TIntermTyped* base, TIntermTyped* index);
#ifndef GLSLANG_WEB
void makeEditable(TSymbol*&) override;
void ioArrayCheck(const TSourceLoc&, const TType&, const TString& identifier);
#endif
bool isIoResizeArray(const TType&) const;
void fixIoArraySize(const TSourceLoc&, TType&);
void ioArrayCheck(const TSourceLoc&, const TType&, const TString& identifier);
void handleIoResizeArrayAccess(const TSourceLoc&, TIntermTyped* base);
void checkIoArraysConsistency(const TSourceLoc&, bool tailOnly = false);
int getIoArrayImplicitSize(const TQualifier&, TString* featureString = nullptr) const;
......@@ -401,6 +410,7 @@ public:
TIntermTyped* addConstructor(const TSourceLoc&, TIntermNode*, const TType&);
TIntermTyped* constructAggregate(TIntermNode*, const TType&, int, const TSourceLoc&);
TIntermTyped* constructBuiltIn(const TType&, TOperator, TIntermTyped*, const TSourceLoc&, bool subset);
void inheritMemoryQualifiers(const TQualifier& from, TQualifier& to);
void declareBlock(const TSourceLoc&, TTypeList& typeList, const TString* instanceName = 0, TArraySizes* arraySizes = 0);
void blockStageIoCheck(const TSourceLoc&, const TQualifier&);
void blockQualifierCheck(const TSourceLoc&, const TQualifier&, bool instanceName);
......@@ -414,6 +424,7 @@ public:
void wrapupSwitchSubsequence(TIntermAggregate* statements, TIntermNode* branchNode);
TIntermNode* addSwitch(const TSourceLoc&, TIntermTyped* expression, TIntermAggregate* body);
#ifndef GLSLANG_WEB
TAttributeType attributeFromName(const TString& name) const;
TAttributes* makeAttributes(const TString& identifier) const;
TAttributes* makeAttributes(const TString& identifier, TIntermNode* node) const;
......@@ -422,11 +433,11 @@ public:
// Determine selection control from attributes
void handleSelectionAttributes(const TAttributes& attributes, TIntermNode*);
void handleSwitchAttributes(const TAttributes& attributes, TIntermNode*);
// Determine loop control from attributes
void handleLoopAttributes(const TAttributes& attributes, TIntermNode*);
#endif
void resizeMeshViewDimension(const TSourceLoc&, TType&);
void checkAndResizeMeshViewDim(const TSourceLoc&, TType&, bool isBlockMember);
protected:
void nonInitConstCheck(const TSourceLoc&, TString& identifier, TType& type);
......@@ -438,7 +449,9 @@ protected:
bool isRuntimeLength(const TIntermTyped&) const;
TIntermNode* executeInitializer(const TSourceLoc&, TIntermTyped* initializer, TVariable* variable);
TIntermTyped* convertInitializerList(const TSourceLoc&, const TType&, TIntermTyped* initializer);
#ifndef GLSLANG_WEB
void finish() override;
#endif
public:
//
......@@ -464,10 +477,11 @@ protected:
TQualifier globalUniformDefaults;
TQualifier globalInputDefaults;
TQualifier globalOutputDefaults;
int* atomicUintOffsets; // to become an array of the right size to hold an offset per binding point
TString currentCaller; // name of last function body entered (not valid when at global scope)
TIdSetType inductiveLoopIds;
#ifndef GLSLANG_WEB
int* atomicUintOffsets; // to become an array of the right size to hold an offset per binding point
bool anyIndexLimits;
TIdSetType inductiveLoopIds;
TVector<TIntermTyped*> needsIndexLimitationChecking;
//
......@@ -503,6 +517,7 @@ protected:
// array-sizing declarations
//
TVector<TSymbol*> ioArraySymbolResizeList;
#endif
};
} // end namespace glslang
......
......@@ -61,63 +61,66 @@ void TType::buildMangledName(TString& mangledName) const
switch (basicType) {
case EbtFloat: mangledName += 'f'; break;
case EbtDouble: mangledName += 'd'; break;
case EbtFloat16: mangledName += "f16"; break;
case EbtInt: mangledName += 'i'; break;
case EbtUint: mangledName += 'u'; break;
case EbtBool: mangledName += 'b'; break;
#ifndef GLSLANG_WEB
case EbtDouble: mangledName += 'd'; break;
case EbtFloat16: mangledName += "f16"; break;
case EbtInt8: mangledName += "i8"; break;
case EbtUint8: mangledName += "u8"; break;
case EbtInt16: mangledName += "i16"; break;
case EbtUint16: mangledName += "u16"; break;
case EbtInt64: mangledName += "i64"; break;
case EbtUint64: mangledName += "u64"; break;
case EbtBool: mangledName += 'b'; break;
case EbtAtomicUint: mangledName += "au"; break;
#ifdef NV_EXTENSIONS
case EbtAccStructNV: mangledName += "asnv"; break;
#endif
case EbtSampler:
switch (sampler.type) {
#ifdef AMD_EXTENSIONS
#ifndef GLSLANG_WEB
case EbtFloat16: mangledName += "f16"; break;
#endif
case EbtInt: mangledName += "i"; break;
case EbtUint: mangledName += "u"; break;
default: break; // some compilers want this
}
if (sampler.image)
mangledName += "I"; // a normal image
else if (sampler.sampler)
if (sampler.isImageClass())
mangledName += "I"; // a normal image or subpass
else if (sampler.isPureSampler())
mangledName += "p"; // a "pure" sampler
else if (!sampler.combined)
else if (!sampler.isCombined())
mangledName += "t"; // a "pure" texture
else
mangledName += "s"; // traditional combined sampler
if (sampler.arrayed)
if (sampler.isArrayed())
mangledName += "A";
if (sampler.shadow)
if (sampler.isShadow())
mangledName += "S";
if (sampler.external)
if (sampler.isExternal())
mangledName += "E";
if (sampler.yuv)
if (sampler.isYuv())
mangledName += "Y";
switch (sampler.dim) {
case Esd1D: mangledName += "1"; break;
case Esd2D: mangledName += "2"; break;
case Esd3D: mangledName += "3"; break;
case EsdCube: mangledName += "C"; break;
#ifndef GLSLANG_WEB
case Esd1D: mangledName += "1"; break;
case EsdRect: mangledName += "R2"; break;
case EsdBuffer: mangledName += "B"; break;
case EsdSubpass: mangledName += "P"; break;
#endif
default: break; // some compilers want this
}
#ifdef ENABLE_HLSL
if (sampler.hasReturnStruct()) {
// Name mangle for sampler return struct uses struct table index.
mangledName += "-tx-struct";
char text[16]; // plenty enough space for the small integers.
snprintf(text, sizeof(text), "%d-", sampler.structReturnIndex);
snprintf(text, sizeof(text), "%d-", sampler.getStructReturnIndex());
mangledName += text;
} else {
switch (sampler.getVectorSize()) {
......@@ -127,8 +130,9 @@ void TType::buildMangledName(TString& mangledName) const
case 4: break; // default to prior name mangle behavior
}
}
#endif
if (sampler.ms)
if (sampler.isMultiSample())
mangledName += "M";
break;
case EbtStruct:
......@@ -172,6 +176,8 @@ void TType::buildMangledName(TString& mangledName) const
}
}
#ifndef GLSLANG_WEB
//
// Dump functions.
//
......@@ -184,7 +190,7 @@ void TSymbol::dumpExtensions(TInfoSink& infoSink) const
for (int i = 0; i < numExtensions; i++)
infoSink.debug << getExtensions()[i] << ",";
infoSink.debug << ">";
}
}
......@@ -229,7 +235,7 @@ void TFunction::dump(TInfoSink& infoSink, bool complete) const
infoSink.debug << "\n";
}
void TAnonMember::dump(TInfoSink& TInfoSink, bool complete) const
void TAnonMember::dump(TInfoSink& TInfoSink, bool) const
{
TInfoSink.debug << "anonymous member " << getMemberNumber() << " of " << getAnonContainer().getName().c_str()
<< "\n";
......@@ -250,6 +256,8 @@ void TSymbolTable::dump(TInfoSink& infoSink, bool complete) const
}
}
#endif
//
// Functions have buried pointers to delete.
//
......
......@@ -116,8 +116,11 @@ public:
}
virtual int getNumExtensions() const { return extensions == nullptr ? 0 : (int)extensions->size(); }
virtual const char** getExtensions() const { return extensions->data(); }
#ifndef GLSLANG_WEB
virtual void dump(TInfoSink& infoSink, bool complete = false) const = 0;
void dumpExtensions(TInfoSink& infoSink) const;
#endif
virtual bool isReadOnly() const { return ! writable; }
virtual void makeReadOnly() { writable = false; }
......@@ -193,7 +196,9 @@ public:
}
virtual const char** getMemberExtensions(int member) const { return (*memberExtensions)[member].data(); }
#ifndef GLSLANG_WEB
virtual void dump(TInfoSink& infoSink, bool complete = false) const;
#endif
protected:
explicit TVariable(const TVariable&);
......@@ -314,7 +319,9 @@ public:
virtual TParameter& operator[](int i) { assert(writable); return parameters[i]; }
virtual const TParameter& operator[](int i) const { return parameters[i]; }
#ifndef GLSLANG_WEB
virtual void dump(TInfoSink& infoSink, bool complete = false) const override;
#endif
protected:
explicit TFunction(const TFunction&);
......@@ -374,7 +381,9 @@ public:
virtual const char** getExtensions() const override { return anonContainer.getMemberExtensions(memberNumber); }
virtual int getAnonId() const { return anonId; }
#ifndef GLSLANG_WEB
virtual void dump(TInfoSink& infoSink, bool complete = false) const override;
#endif
protected:
explicit TAnonMember(const TAnonMember&);
......@@ -542,7 +551,9 @@ public:
void relateToOperator(const char* name, TOperator op);
void setFunctionExtensions(const char* name, int num, const char* const extensions[]);
#ifndef GLSLANG_WEB
void dump(TInfoSink& infoSink, bool complete = false) const;
#endif
TSymbolTableLevel* clone() const;
void readOnly();
......@@ -843,7 +854,9 @@ public:
}
int getMaxSymbolId() { return uniqueId; }
#ifndef GLSLANG_WEB
void dump(TInfoSink& infoSink, bool complete = false) const;
#endif
void copyTable(const TSymbolTable& copyOf);
void setPreviousDefaultPrecisions(TPrecisionQualifier *p) { table[currentLevel()]->setPreviousDefaultPrecisions(p); }
......
......@@ -124,8 +124,10 @@ const char* const E_GL_ARB_compute_shader = "GL_ARB_compute_shader
const char* const E_GL_ARB_tessellation_shader = "GL_ARB_tessellation_shader";
const char* const E_GL_ARB_enhanced_layouts = "GL_ARB_enhanced_layouts";
const char* const E_GL_ARB_texture_cube_map_array = "GL_ARB_texture_cube_map_array";
const char* const E_GL_ARB_texture_multisample = "GL_ARB_texture_multisample";
const char* const E_GL_ARB_shader_texture_lod = "GL_ARB_shader_texture_lod";
const char* const E_GL_ARB_explicit_attrib_location = "GL_ARB_explicit_attrib_location";
const char* const E_GL_ARB_explicit_uniform_location = "GL_ARB_explicit_uniform_location";
const char* const E_GL_ARB_shader_image_load_store = "GL_ARB_shader_image_load_store";
const char* const E_GL_ARB_shader_atomic_counters = "GL_ARB_shader_atomic_counters";
const char* const E_GL_ARB_shader_draw_parameters = "GL_ARB_shader_draw_parameters";
......@@ -134,6 +136,7 @@ const char* const E_GL_ARB_derivative_control = "GL_ARB_derivative_con
const char* const E_GL_ARB_shader_texture_image_samples = "GL_ARB_shader_texture_image_samples";
const char* const E_GL_ARB_viewport_array = "GL_ARB_viewport_array";
const char* const E_GL_ARB_gpu_shader_int64 = "GL_ARB_gpu_shader_int64";
const char* const E_GL_ARB_gpu_shader_fp64 = "GL_ARB_gpu_shader_fp64";
const char* const E_GL_ARB_shader_ballot = "GL_ARB_shader_ballot";
const char* const E_GL_ARB_sparse_texture2 = "GL_ARB_sparse_texture2";
const char* const E_GL_ARB_sparse_texture_clamp = "GL_ARB_sparse_texture_clamp";
......@@ -141,6 +144,10 @@ const char* const E_GL_ARB_shader_stencil_export = "GL_ARB_shader_stencil
// const char* const E_GL_ARB_cull_distance = "GL_ARB_cull_distance"; // present for 4.5, but need extension control over block members
const char* const E_GL_ARB_post_depth_coverage = "GL_ARB_post_depth_coverage";
const char* const E_GL_ARB_shader_viewport_layer_array = "GL_ARB_shader_viewport_layer_array";
const char* const E_GL_ARB_fragment_shader_interlock = "GL_ARB_fragment_shader_interlock";
const char* const E_GL_ARB_shader_clock = "GL_ARB_shader_clock";
const char* const E_GL_ARB_uniform_buffer_object = "GL_ARB_uniform_buffer_object";
const char* const E_GL_ARB_sample_shading = "GL_ARB_sample_shading";
const char* const E_GL_KHR_shader_subgroup_basic = "GL_KHR_shader_subgroup_basic";
const char* const E_GL_KHR_shader_subgroup_vote = "GL_KHR_shader_subgroup_vote";
......@@ -172,6 +179,9 @@ const char* const E_GL_EXT_scalar_block_layout = "GL_EXT_scalar_blo
const char* const E_GL_EXT_fragment_invocation_density = "GL_EXT_fragment_invocation_density";
const char* const E_GL_EXT_buffer_reference = "GL_EXT_buffer_reference";
const char* const E_GL_EXT_buffer_reference2 = "GL_EXT_buffer_reference2";
const char* const E_GL_EXT_buffer_reference_uvec2 = "GL_EXT_buffer_reference_uvec2";
const char* const E_GL_EXT_demote_to_helper_invocation = "GL_EXT_demote_to_helper_invocation";
const char* const E_GL_EXT_shader_realtime_clock = "GL_EXT_shader_realtime_clock";
// Arrays of extensions for the above viewportEXTs duplications
......@@ -189,7 +199,6 @@ const int Num_OVR_multiview_EXTs = sizeof(OVR_multiview_EXTs) / sizeof(OVR_multi
const char* const E_GL_GOOGLE_cpp_style_line_directive = "GL_GOOGLE_cpp_style_line_directive";
const char* const E_GL_GOOGLE_include_directive = "GL_GOOGLE_include_directive";
#ifdef AMD_EXTENSIONS
const char* const E_GL_AMD_shader_ballot = "GL_AMD_shader_ballot";
const char* const E_GL_AMD_shader_trinary_minmax = "GL_AMD_shader_trinary_minmax";
const char* const E_GL_AMD_shader_explicit_vertex_parameter = "GL_AMD_shader_explicit_vertex_parameter";
......@@ -200,9 +209,8 @@ const char* const E_GL_AMD_gpu_shader_int16 = "GL_AMD_gpu_sh
const char* const E_GL_AMD_shader_image_load_store_lod = "GL_AMD_shader_image_load_store_lod";
const char* const E_GL_AMD_shader_fragment_mask = "GL_AMD_shader_fragment_mask";
const char* const E_GL_AMD_gpu_shader_half_float_fetch = "GL_AMD_gpu_shader_half_float_fetch";
#endif
#ifdef NV_EXTENSIONS
const char* const E_GL_INTEL_shader_integer_functions2 = "GL_INTEL_shader_integer_functions2";
const char* const E_GL_NV_sample_mask_override_coverage = "GL_NV_sample_mask_override_coverage";
const char* const E_SPV_NV_geometry_shader_passthrough = "GL_NV_geometry_shader_passthrough";
......@@ -224,9 +232,10 @@ const char* const E_GL_NV_mesh_shader = "GL_NV_mesh_sh
const char* const viewportEXTs[] = { E_GL_ARB_shader_viewport_layer_array, E_GL_NV_viewport_array2 };
const int Num_viewportEXTs = sizeof(viewportEXTs) / sizeof(viewportEXTs[0]);
#endif
const char* const E_GL_NV_cooperative_matrix = "GL_NV_cooperative_matrix";
const char* const E_GL_NV_shader_sm_builtins = "GL_NV_shader_sm_builtins";
const char* const E_GL_NV_integer_cooperative_matrix = "GL_NV_integer_cooperative_matrix";
// AEP
const char* const E_GL_ANDROID_extension_pack_es31a = "GL_ANDROID_extension_pack_es31a";
......@@ -256,7 +265,7 @@ const char* const E_GL_OES_tessellation_point_size = "GL_OES_tessel
const char* const E_GL_OES_texture_buffer = "GL_OES_texture_buffer";
const char* const E_GL_OES_texture_cube_map_array = "GL_OES_texture_cube_map_array";
// KHX
// EXT
const char* const E_GL_EXT_shader_explicit_arithmetic_types = "GL_EXT_shader_explicit_arithmetic_types";
const char* const E_GL_EXT_shader_explicit_arithmetic_types_int8 = "GL_EXT_shader_explicit_arithmetic_types_int8";
const char* const E_GL_EXT_shader_explicit_arithmetic_types_int16 = "GL_EXT_shader_explicit_arithmetic_types_int16";
......@@ -266,6 +275,11 @@ const char* const E_GL_EXT_shader_explicit_arithmetic_types_float16 = "GL_EXT_s
const char* const E_GL_EXT_shader_explicit_arithmetic_types_float32 = "GL_EXT_shader_explicit_arithmetic_types_float32";
const char* const E_GL_EXT_shader_explicit_arithmetic_types_float64 = "GL_EXT_shader_explicit_arithmetic_types_float64";
const char* const E_GL_EXT_shader_subgroup_extended_types_int8 = "GL_EXT_shader_subgroup_extended_types_int8";
const char* const E_GL_EXT_shader_subgroup_extended_types_int16 = "GL_EXT_shader_subgroup_extended_types_int16";
const char* const E_GL_EXT_shader_subgroup_extended_types_int64 = "GL_EXT_shader_subgroup_extended_types_int64";
const char* const E_GL_EXT_shader_subgroup_extended_types_float16 = "GL_EXT_shader_subgroup_extended_types_float16";
// Arrays of extensions for the above AEP duplications
const char* const AEP_geometry_shader[] = { E_GL_EXT_geometry_shader, E_GL_OES_geometry_shader };
......
......@@ -34,6 +34,8 @@
// POSSIBILITY OF SUCH DAMAGE.
//
#ifndef GLSLANG_WEB
#include "attribute.h"
#include "../Include/intermediate.h"
#include "ParseHelper.h"
......@@ -339,5 +341,6 @@ void TParseContext::handleLoopAttributes(const TAttributes& attributes, TIntermN
}
}
} // end namespace glslang
#endif // GLSLANG_WEB
......@@ -76,7 +76,49 @@ namespace glslang {
EatMaxIterations,
EatIterationMultiple,
EatPeelCount,
EatPartialCount
EatPartialCount,
EatFormatRgba32f,
EatFormatRgba16f,
EatFormatR32f,
EatFormatRgba8,
EatFormatRgba8Snorm,
EatFormatRg32f,
EatFormatRg16f,
EatFormatR11fG11fB10f,
EatFormatR16f,
EatFormatRgba16,
EatFormatRgb10A2,
EatFormatRg16,
EatFormatRg8,
EatFormatR16,
EatFormatR8,
EatFormatRgba16Snorm,
EatFormatRg16Snorm,
EatFormatRg8Snorm,
EatFormatR16Snorm,
EatFormatR8Snorm,
EatFormatRgba32i,
EatFormatRgba16i,
EatFormatRgba8i,
EatFormatR32i,
EatFormatRg32i,
EatFormatRg16i,
EatFormatRg8i,
EatFormatR16i,
EatFormatR8i,
EatFormatRgba32ui,
EatFormatRgba16ui,
EatFormatRgba8ui,
EatFormatR32ui,
EatFormatRgb10a2ui,
EatFormatRg32ui,
EatFormatRg16ui,
EatFormatRg8ui,
EatFormatR16ui,
EatFormatR8ui,
EatFormatUnknown,
EatNonWritable,
EatNonReadable
};
class TIntermAggregate;
......
......@@ -78,7 +78,6 @@
#define GL_DOUBLE_MAT4x2 0x8F4D
#define GL_DOUBLE_MAT4x3 0x8F4E
#ifdef AMD_EXTENSIONS
// Those constants are borrowed from extension NV_gpu_shader5
#define GL_FLOAT16_NV 0x8FF8
#define GL_FLOAT16_VEC2_NV 0x8FF9
......@@ -94,7 +93,6 @@
#define GL_FLOAT16_MAT3x4_AMD 0x91CB
#define GL_FLOAT16_MAT4x2_AMD 0x91CC
#define GL_FLOAT16_MAT4x3_AMD 0x91CD
#endif
#define GL_SAMPLER_1D 0x8B5D
#define GL_SAMPLER_2D 0x8B5E
......@@ -117,7 +115,6 @@
#define GL_SAMPLER_CUBE_MAP_ARRAY_ARB 0x900C
#define GL_SAMPLER_CUBE_MAP_ARRAY_SHADOW_ARB 0x900D
#ifdef AMD_EXTENSIONS
#define GL_FLOAT16_SAMPLER_1D_AMD 0x91CE
#define GL_FLOAT16_SAMPLER_2D_AMD 0x91CF
#define GL_FLOAT16_SAMPLER_3D_AMD 0x91D0
......@@ -149,7 +146,6 @@
#define GL_FLOAT16_IMAGE_BUFFER_AMD 0x91E8
#define GL_FLOAT16_IMAGE_2D_MULTISAMPLE_AMD 0x91E9
#define GL_FLOAT16_IMAGE_2D_MULTISAMPLE_ARRAY_AMD 0x91EA
#endif
#define GL_INT_SAMPLER_1D 0x8DC9
#define GL_INT_SAMPLER_2D 0x8DCA
......
This source diff could not be displayed because it is too large. You can view the blob instead.
......@@ -187,12 +187,14 @@ bool TIndexTraverser::visitAggregate(TVisit /* visit */, TIntermAggregate* node)
//
void TParseContext::constantIndexExpressionCheck(TIntermNode* index)
{
#ifndef GLSLANG_WEB
TIndexTraverser it(inductiveLoopIds);
index->traverse(&it);
if (it.bad)
error(it.badLoc, "Non-constant-index-expression", "limitations", "");
#endif
}
} // end namespace glslang
......@@ -545,7 +545,7 @@ int TPpContext::evalToToken(int token, bool shortCircuit, int& res, bool& err, T
case MacroExpandStarted:
break;
case MacroExpandUndef:
if (! shortCircuit && parseContext.profile == EEsProfile) {
if (! shortCircuit && parseContext.isEsProfile()) {
const char* message = "undefined macro in expression not allowed in es profile";
if (parseContext.relaxedErrors())
parseContext.ppWarn(ppToken->loc, message, "preprocessor evaluation", ppToken->name);
......@@ -722,6 +722,7 @@ int TPpContext::CPPline(TPpToken* ppToken)
parseContext.setCurrentLine(lineRes);
if (token != '\n') {
#ifndef GLSLANG_WEB
if (token == PpAtomConstString) {
parseContext.ppRequireExtensions(directiveLoc, 1, &E_GL_GOOGLE_cpp_style_line_directive, "filename-based #line");
// We need to save a copy of the string instead of pointing
......@@ -731,7 +732,9 @@ int TPpContext::CPPline(TPpToken* ppToken)
parseContext.setCurrentSourceName(sourceName);
hasFile = true;
token = scanToken(ppToken);
} else {
} else
#endif
{
token = eval(token, MIN_PRECEDENCE, false, fileRes, fileErr, ppToken);
if (! fileErr) {
parseContext.setCurrentString(fileRes);
......@@ -792,10 +795,8 @@ int TPpContext::CPPpragma(TPpToken* ppToken)
case PpAtomConstUint:
case PpAtomConstInt64:
case PpAtomConstUint64:
#ifdef AMD_EXTENSIONS
case PpAtomConstInt16:
case PpAtomConstUint16:
#endif
case PpAtomConstFloat:
case PpAtomConstDouble:
case PpAtomConstFloat16:
......@@ -954,18 +955,20 @@ int TPpContext::readCPPline(TPpToken* ppToken)
case PpAtomIfndef:
token = CPPifdef(0, ppToken);
break;
case PpAtomLine:
token = CPPline(ppToken);
break;
#ifndef GLSLANG_WEB
case PpAtomInclude:
if(!parseContext.isReadingHLSL()) {
parseContext.ppRequireExtensions(ppToken->loc, 1, &E_GL_GOOGLE_include_directive, "#include");
}
token = CPPinclude(ppToken);
break;
case PpAtomLine:
token = CPPline(ppToken);
break;
case PpAtomPragma:
token = CPPpragma(ppToken);
break;
#endif
case PpAtomUndef:
token = CPPundef(ppToken);
break;
......
......@@ -142,6 +142,7 @@ int TPpContext::lFloatConst(int len, int ch, TPpToken* ppToken)
ch = getChar();
int firstDecimal = len;
#ifdef ENABLE_HLSL
// 1.#INF or -1.#INF
if (ch == '#' && (ifdepth > 0 || parseContext.intermediate.getSource() == EShSourceHlsl)) {
if ((len < 2) ||
......@@ -169,6 +170,7 @@ int TPpContext::lFloatConst(int len, int ch, TPpToken* ppToken)
}
}
}
#endif
// Consume leading-zero digits after the decimal point
while (ch == '0') {
......@@ -257,6 +259,7 @@ int TPpContext::lFloatConst(int len, int ch, TPpToken* ppToken)
// Suffix:
bool isDouble = false;
bool isFloat16 = false;
#ifndef GLSLANG_WEB
if (ch == 'l' || ch == 'L') {
if (ifdepth == 0 && parseContext.intermediate.getSource() == EShSourceGlsl)
parseContext.doubleCheck(ppToken->loc, "double floating-point suffix");
......@@ -295,11 +298,15 @@ int TPpContext::lFloatConst(int len, int ch, TPpToken* ppToken)
saveName(ch);
isFloat16 = true;
}
} else if (ch == 'f' || ch == 'F') {
} else
#endif
if (ch == 'f' || ch == 'F') {
#ifndef GLSLANG_WEB
if (ifdepth == 0)
parseContext.profileRequires(ppToken->loc, EEsProfile, 300, nullptr, "floating-point suffix");
if (ifdepth == 0 && !parseContext.relaxedErrors())
parseContext.profileRequires(ppToken->loc, ~EEsProfile, 120, nullptr, "floating-point suffix");
#endif
if (ifdepth == 0 && !hasDecimalOrExponent)
parseContext.ppError(ppToken->loc, "float literal needs a decimal point or exponent", "", "");
saveName(ch);
......@@ -468,9 +475,7 @@ int TPpContext::tStringInput::scan(TPpToken* ppToken)
static const int Num_Int64_Extensions = sizeof(Int64_Extensions) / sizeof(Int64_Extensions[0]);
static const char* const Int16_Extensions[] = {
#ifdef AMD_EXTENSIONS
E_GL_AMD_gpu_shader_int16,
#endif
E_GL_EXT_shader_explicit_arithmetic_types,
E_GL_EXT_shader_explicit_arithmetic_types_int16 };
static const int Num_Int16_Extensions = sizeof(Int16_Extensions) / sizeof(Int16_Extensions[0]);
......@@ -579,6 +584,7 @@ int TPpContext::tStringInput::scan(TPpToken* ppToken)
ppToken->name[len++] = (char)ch;
isUnsigned = true;
#ifndef GLSLANG_WEB
int nextCh = getch();
if (nextCh == 'l' || nextCh == 'L') {
if (len < MaxTokenLength)
......@@ -587,7 +593,6 @@ int TPpContext::tStringInput::scan(TPpToken* ppToken)
} else
ungetch();
#ifdef AMD_EXTENSIONS
nextCh = getch();
if ((nextCh == 's' || nextCh == 'S') &&
pp->parseContext.intermediate.getSource() == EShSourceGlsl) {
......@@ -596,12 +601,10 @@ int TPpContext::tStringInput::scan(TPpToken* ppToken)
isInt16 = true;
} else
ungetch();
#endif
} else if (ch == 'l' || ch == 'L') {
if (len < MaxTokenLength)
ppToken->name[len++] = (char)ch;
isInt64 = true;
#ifdef AMD_EXTENSIONS
} else if ((ch == 's' || ch == 'S') &&
pp->parseContext.intermediate.getSource() == EShSourceGlsl) {
if (len < MaxTokenLength)
......@@ -689,6 +692,7 @@ int TPpContext::tStringInput::scan(TPpToken* ppToken)
ppToken->name[len++] = (char)ch;
isUnsigned = true;
#ifndef GLSLANG_WEB
int nextCh = getch();
if (nextCh == 'l' || nextCh == 'L') {
if (len < MaxTokenLength)
......@@ -697,7 +701,6 @@ int TPpContext::tStringInput::scan(TPpToken* ppToken)
} else
ungetch();
#ifdef AMD_EXTENSIONS
nextCh = getch();
if ((nextCh == 's' || nextCh == 'S') &&
pp->parseContext.intermediate.getSource() == EShSourceGlsl) {
......@@ -706,12 +709,10 @@ int TPpContext::tStringInput::scan(TPpToken* ppToken)
isInt16 = true;
} else
ungetch();
#endif
} else if (ch == 'l' || ch == 'L') {
if (len < MaxTokenLength)
ppToken->name[len++] = (char)ch;
isInt64 = true;
#ifdef AMD_EXTENSIONS
} else if ((ch == 's' || ch == 'S') &&
pp->parseContext.intermediate.getSource() == EShSourceGlsl) {
if (len < MaxTokenLength)
......@@ -780,6 +781,7 @@ int TPpContext::tStringInput::scan(TPpToken* ppToken)
ppToken->name[len++] = (char)ch;
isUnsigned = true;
#ifndef GLSLANG_WEB
int nextCh = getch();
if (nextCh == 'l' || nextCh == 'L') {
if (len < MaxTokenLength)
......@@ -788,7 +790,6 @@ int TPpContext::tStringInput::scan(TPpToken* ppToken)
} else
ungetch();
#ifdef AMD_EXTENSIONS
nextCh = getch();
if ((nextCh == 's' || nextCh == 'S') &&
pp->parseContext.intermediate.getSource() == EShSourceGlsl) {
......@@ -797,12 +798,10 @@ int TPpContext::tStringInput::scan(TPpToken* ppToken)
isInt16 = true;
} else
ungetch();
#endif
} else if (ch == 'l' || ch == 'L') {
if (len < MaxTokenLength)
ppToken->name[len++] = (char)ch;
isInt64 = true;
#ifdef AMD_EXTENSIONS
} else if ((ch == 's' || ch == 'S') &&
pp->parseContext.intermediate.getSource() == EShSourceGlsl) {
if (len < MaxTokenLength)
......
......@@ -116,6 +116,7 @@ int TPpContext::TokenStream::getToken(TParseContextBase& parseContext, TPpToken
int atom = stream[currentPos++].get(*ppToken);
ppToken->loc = parseContext.getCurrentLoc();
#ifndef GLSLANG_WEB
// Check for ##, unless the current # is the last character
if (atom == '#') {
if (peekToken('#')) {
......@@ -125,6 +126,7 @@ int TPpContext::TokenStream::getToken(TParseContextBase& parseContext, TPpToken
atom = PpAtomPaste;
}
}
#endif
return atom;
}
......
......@@ -37,6 +37,8 @@
// propagate the 'noContraction' qualifier.
//
#ifndef GLSLANG_WEB
#include "propagateNoContraction.h"
#include <cstdlib>
......@@ -79,7 +81,7 @@ typedef std::unordered_set<glslang::TIntermBranch*> ReturnBranchNodeSet;
// the node has 'noContraction' qualifier, otherwise false.
bool isPreciseObjectNode(glslang::TIntermTyped* node)
{
return node->getType().getQualifier().noContraction;
return node->getType().getQualifier().isNoContraction();
}
// Returns true if the opcode is a dereferencing one.
......@@ -864,3 +866,5 @@ void PropagateNoContraction(const glslang::TIntermediate& intermediate)
}
}
};
#endif // GLSLANG_WEB
\ No newline at end of file
......@@ -33,6 +33,8 @@
// POSSIBILITY OF SUCH DAMAGE.
//
#ifndef GLSLANG_WEB
#include "../Include/Common.h"
#include "reflection.h"
#include "LiveTraverser.h"
......@@ -110,6 +112,10 @@ public:
TReflection::TMapIndexToReflection &ioItems =
input ? reflection.indexToPipeInput : reflection.indexToPipeOutput;
TReflection::TNameToIndex &ioMapper =
input ? reflection.pipeInNameToIndex : reflection.pipeOutNameToIndex;
if (reflection.options & EShReflectionUnwrapIOBlocks) {
bool anonymous = IsAnonymous(name);
......@@ -127,12 +133,13 @@ public:
blowUpIOAggregate(input, baseName, type);
}
} else {
TReflection::TNameToIndex::const_iterator it = reflection.nameToIndex.find(name.c_str());
if (it == reflection.nameToIndex.end()) {
reflection.nameToIndex[name.c_str()] = (int)ioItems.size();
TReflection::TNameToIndex::const_iterator it = ioMapper.find(name.c_str());
if (it == ioMapper.end()) {
// seperate pipe i/o params from uniforms and blocks
// in is only for input in first stage as out is only for last stage. check traverse in call stack.
ioMapper[name.c_str()] = static_cast<int>(ioItems.size());
ioItems.push_back(
TObjectReflection(name.c_str(), type, 0, mapToGlType(type), mapToGlArraySize(type), 0));
EShLanguageMask& stages = ioItems.back().stages;
stages = static_cast<EShLanguageMask>(stages | 1 << intermediate.getStage());
} else {
......@@ -396,7 +403,7 @@ public:
topLevelArrayStride = variables.back().arrayStride;
}
if ((reflection.options & EShReflectionSeparateBuffers) && terminalType->getBasicType() == EbtAtomicUint)
if ((reflection.options & EShReflectionSeparateBuffers) && terminalType->isAtomic())
reflection.atomicCounterUniformIndices.push_back(uniformIndex);
variables.back().topLevelArrayStride = topLevelArrayStride;
......@@ -554,15 +561,18 @@ public:
bool blockParent = (base->getType().getBasicType() == EbtBlock && base->getQualifier().storage == EvqBuffer);
if (strictArraySuffix && blockParent) {
const TTypeList& typeList = *base->getType().getStruct();
TType structDerefType(base->getType(), 0);
const TType &structType = base->getType().isArray() ? structDerefType : base->getType();
const TTypeList& typeList = *structType.getStruct();
TVector<int> memberOffsets;
memberOffsets.resize(typeList.size());
getOffsets(base->getType(), memberOffsets);
getOffsets(structType, memberOffsets);
for (int i = 0; i < (int)typeList.size(); ++i) {
TType derefType(base->getType(), i);
TType derefType(structType, i);
TString name = baseName;
if (name.size() > 0)
name.append(".");
......@@ -573,7 +583,7 @@ public:
if (derefType.isArray() && derefType.isStruct()) {
name.append("[0]");
blowUpActiveAggregate(TType(derefType, 0), name, derefs, derefs.end(), memberOffsets[i],
blockIndex, 0, getArrayStride(base->getType(), derefType),
blockIndex, 0, getArrayStride(structType, derefType),
base->getQualifier().storage, false);
} else {
blowUpActiveAggregate(derefType, name, derefs, derefs.end(), memberOffsets[i], blockIndex,
......@@ -701,7 +711,6 @@ public:
case EsdBuffer:
return GL_SAMPLER_BUFFER;
}
#ifdef AMD_EXTENSIONS
case EbtFloat16:
switch ((int)sampler.dim) {
case Esd1D:
......@@ -730,7 +739,6 @@ public:
case EsdBuffer:
return GL_FLOAT16_SAMPLER_BUFFER_AMD;
}
#endif
case EbtInt:
switch ((int)sampler.dim) {
case Esd1D:
......@@ -793,7 +801,6 @@ public:
case EsdBuffer:
return GL_IMAGE_BUFFER;
}
#ifdef AMD_EXTENSIONS
case EbtFloat16:
switch ((int)sampler.dim) {
case Esd1D:
......@@ -812,7 +819,6 @@ public:
case EsdBuffer:
return GL_FLOAT16_IMAGE_BUFFER_AMD;
}
#endif
case EbtInt:
switch ((int)sampler.dim) {
case Esd1D:
......@@ -878,9 +884,7 @@ public:
switch (type.getBasicType()) {
case EbtFloat: return GL_FLOAT_VEC2 + offset;
case EbtDouble: return GL_DOUBLE_VEC2 + offset;
#ifdef AMD_EXTENSIONS
case EbtFloat16: return GL_FLOAT16_VEC2_NV + offset;
#endif
case EbtInt: return GL_INT_VEC2 + offset;
case EbtUint: return GL_UNSIGNED_INT_VEC2 + offset;
case EbtInt64: return GL_INT64_ARB + offset;
......@@ -940,7 +944,6 @@ public:
default: return 0;
}
}
#ifdef AMD_EXTENSIONS
case EbtFloat16:
switch (type.getMatrixCols()) {
case 2:
......@@ -965,7 +968,6 @@ public:
default: return 0;
}
}
#endif
default:
return 0;
}
......@@ -974,9 +976,7 @@ public:
switch (type.getBasicType()) {
case EbtFloat: return GL_FLOAT;
case EbtDouble: return GL_DOUBLE;
#ifdef AMD_EXTENSIONS
case EbtFloat16: return GL_FLOAT16_NV;
#endif
case EbtInt: return GL_INT;
case EbtUint: return GL_UNSIGNED_INT;
case EbtInt64: return GL_INT64_ARB;
......@@ -1093,6 +1093,7 @@ void TReflection::buildAttributeReflection(EShLanguage stage, const TIntermediat
// build counter block index associations for buffers
void TReflection::buildCounterIndices(const TIntermediate& intermediate)
{
#ifdef ENABLE_HLSL
// search for ones that have counters
for (int i = 0; i < int(indexToUniformBlock.size()); ++i) {
const TString counterName(intermediate.addCounterBufferName(indexToUniformBlock[i].name).c_str());
......@@ -1101,6 +1102,7 @@ void TReflection::buildCounterIndices(const TIntermediate& intermediate)
if (index >= 0)
indexToUniformBlock[i].counterIndex = index;
}
#endif
}
// build Shader Stages mask for all uniforms
......@@ -1198,3 +1200,5 @@ void TReflection::dump()
}
} // end namespace glslang
#endif // GLSLANG_WEB
......@@ -33,6 +33,8 @@
// POSSIBILITY OF SUCH DAMAGE.
//
#ifndef GLSLANG_WEB
#ifndef _REFLECTION_INCLUDED
#define _REFLECTION_INCLUDED
......@@ -150,6 +152,20 @@ public:
// see getIndex(const char*)
int getIndex(const TString& name) const { return getIndex(name.c_str()); }
// for mapping any name to its index (only pipe input/output names)
int getPipeIOIndex(const char* name, const bool inOrOut) const
{
TNameToIndex::const_iterator it = inOrOut ? pipeInNameToIndex.find(name) : pipeOutNameToIndex.find(name);
if (it == (inOrOut ? pipeInNameToIndex.end() : pipeOutNameToIndex.end()))
return -1;
else
return it->second;
}
// see gePipeIOIndex(const char*, const bool)
int getPipeIOIndex(const TString& name, const bool inOrOut) const { return getPipeIOIndex(name.c_str(), inOrOut); }
// Thread local size
unsigned getLocalSize(int dim) const { return dim <= 2 ? localSize[dim] : 0; }
......@@ -187,6 +203,8 @@ protected:
TObjectReflection badReflection; // return for queries of -1 or generally out of range; has expected descriptions with in it for this
TNameToIndex nameToIndex; // maps names to indexes; can hold all types of data: uniform/buffer and which function names have been processed
TNameToIndex pipeInNameToIndex; // maps pipe in names to indexes, this is a fix to seperate pipe I/O from uniforms and buffers.
TNameToIndex pipeOutNameToIndex; // maps pipe out names to indexes, this is a fix to seperate pipe I/O from uniforms and buffers.
TMapIndexToReflection indexToUniform;
TMapIndexToReflection indexToUniformBlock;
TMapIndexToReflection indexToBufferVariable;
......@@ -201,3 +219,5 @@ protected:
} // end namespace glslang
#endif // _REFLECTION_INCLUDED
#endif // GLSLANG_WEB
\ No newline at end of file
export default (() => {
const initialize = () => {
return new Promise(resolve => {
Module({
locateFile() {
const i = import.meta.url.lastIndexOf('/')
return import.meta.url.substring(0, i) + '/glslang.wasm';
},
onRuntimeInitialized() {
resolve({
compileGLSLZeroCopy: this.compileGLSLZeroCopy,
compileGLSL: this.compileGLSL,
});
},
});
});
};
let instance;
return () => {
if (!instance) {
instance = initialize();
}
return instance;
};
})();
Module['compileGLSLZeroCopy'] = function(glsl, shader_stage, gen_debug) {
gen_debug = !!gen_debug;
var shader_stage_int;
if (shader_stage === 'vertex') {
shader_stage_int = 0;
} else if (shader_stage === 'fragment') {
shader_stage_int = 4;
} else if (shader_stage === 'compute') {
shader_stage_int = 5;
} else {
throw new Error("shader_stage must be 'vertex', 'fragment', or 'compute'");
}
var p_output = Module['_malloc'](4);
var p_output_len = Module['_malloc'](4);
var id = ccall('convert_glsl_to_spirv',
'number',
['string', 'number', 'boolean', 'number', 'number'],
[glsl, shader_stage_int, gen_debug, p_output, p_output_len]);
var output = getValue(p_output, 'i32');
var output_len = getValue(p_output_len, 'i32');
Module['_free'](p_output);
Module['_free'](p_output_len);
if (id === 0) {
throw new Error('GLSL compilation failed');
}
var ret = {};
var outputIndexU32 = output / 4;
ret['data'] = Module['HEAPU32'].subarray(outputIndexU32, outputIndexU32 + output_len);
ret['free'] = function() {
Module['_destroy_output_buffer'](id);
};
return ret;
};
Module['compileGLSL'] = function(glsl, shader_stage, gen_debug) {
var compiled = Module['compileGLSLZeroCopy'](glsl, shader_stage, gen_debug);
var ret = compiled['data'].slice()
compiled['free']();
return ret;
};
diff --git a/thirdparty/glslang/glslang/Include/Common.h b/thirdparty/glslang/glslang/Include/Common.h
index 733a790cfd..2c511bc1c5 100644
--- a/thirdparty/glslang/glslang/Include/Common.h
+++ b/thirdparty/glslang/glslang/Include/Common.h
@@ -50,7 +50,9 @@ std::string to_string(const T& val) {
}
#endif
-#if (defined(_MSC_VER) && _MSC_VER < 1900 /*vs2015*/) || defined MINGW_HAS_SECURE_API
+// -- GODOT start --
+#if (defined(_MSC_VER) && _MSC_VER < 1900 /*vs2015*/) /* || defined MINGW_HAS_SECURE_API */
+// -- GODOT end --
#include <basetsd.h>
#ifndef snprintf
#define snprintf sprintf_s
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment