summaryrefslogtreecommitdiffstats
path: root/third_party/rust/naga/src/front/glsl
diff options
context:
space:
mode:
Diffstat (limited to 'third_party/rust/naga/src/front/glsl')
-rw-r--r--third_party/rust/naga/src/front/glsl/ast.rs393
-rw-r--r--third_party/rust/naga/src/front/glsl/builtins.rs2497
-rw-r--r--third_party/rust/naga/src/front/glsl/constants.rs984
-rw-r--r--third_party/rust/naga/src/front/glsl/context.rs1609
-rw-r--r--third_party/rust/naga/src/front/glsl/error.rs134
-rw-r--r--third_party/rust/naga/src/front/glsl/functions.rs1680
-rw-r--r--third_party/rust/naga/src/front/glsl/lex.rs301
-rw-r--r--third_party/rust/naga/src/front/glsl/mod.rs235
-rw-r--r--third_party/rust/naga/src/front/glsl/offset.rs173
-rw-r--r--third_party/rust/naga/src/front/glsl/parser.rs448
-rw-r--r--third_party/rust/naga/src/front/glsl/parser/declarations.rs671
-rw-r--r--third_party/rust/naga/src/front/glsl/parser/expressions.rs546
-rw-r--r--third_party/rust/naga/src/front/glsl/parser/functions.rs654
-rw-r--r--third_party/rust/naga/src/front/glsl/parser/types.rs434
-rw-r--r--third_party/rust/naga/src/front/glsl/parser_tests.rs832
-rw-r--r--third_party/rust/naga/src/front/glsl/token.rs137
-rw-r--r--third_party/rust/naga/src/front/glsl/types.rs335
-rw-r--r--third_party/rust/naga/src/front/glsl/variables.rs679
18 files changed, 12742 insertions, 0 deletions
diff --git a/third_party/rust/naga/src/front/glsl/ast.rs b/third_party/rust/naga/src/front/glsl/ast.rs
new file mode 100644
index 0000000000..cbb21f6de9
--- /dev/null
+++ b/third_party/rust/naga/src/front/glsl/ast.rs
@@ -0,0 +1,393 @@
+use std::{borrow::Cow, fmt};
+
+use super::{builtins::MacroCall, context::ExprPos, Span};
+use crate::{
+ AddressSpace, BinaryOperator, Binding, Constant, Expression, Function, GlobalVariable, Handle,
+ Interpolation, Sampling, StorageAccess, Type, UnaryOperator,
+};
+
+#[derive(Debug, Clone, Copy)]
+pub enum GlobalLookupKind {
+ Variable(Handle<GlobalVariable>),
+ Constant(Handle<Constant>, Handle<Type>),
+ BlockSelect(Handle<GlobalVariable>, u32),
+}
+
+#[derive(Debug, Clone, Copy)]
+pub struct GlobalLookup {
+ pub kind: GlobalLookupKind,
+ pub entry_arg: Option<usize>,
+ pub mutable: bool,
+}
+
+#[derive(Debug, Clone)]
+pub struct ParameterInfo {
+ pub qualifier: ParameterQualifier,
+ /// Whether the parameter should be treated as a depth image instead of a
+ /// sampled image.
+ pub depth: bool,
+}
+
+/// How the function is implemented
+#[derive(Clone, Copy)]
+pub enum FunctionKind {
+ /// The function is user defined
+ Call(Handle<Function>),
+ /// The function is a builtin
+ Macro(MacroCall),
+}
+
+impl fmt::Debug for FunctionKind {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match *self {
+ Self::Call(_) => write!(f, "Call"),
+ Self::Macro(_) => write!(f, "Macro"),
+ }
+ }
+}
+
+#[derive(Debug)]
+pub struct Overload {
+ /// Normalized function parameters, modifiers are not applied
+ pub parameters: Vec<Handle<Type>>,
+ pub parameters_info: Vec<ParameterInfo>,
+ /// How the function is implemented
+ pub kind: FunctionKind,
+ /// Whether this function was already defined or is just a prototype
+ pub defined: bool,
+ /// Whether this overload is the one provided by the language or has
+ /// been redeclared by the user (builtins only)
+ pub internal: bool,
+ /// Whether or not this function returns void (nothing)
+ pub void: bool,
+}
+
+bitflags::bitflags! {
+ /// Tracks the variations of the builtin already generated, this is needed because some
+ /// builtins overloads can't be generated unless explicitly used, since they might cause
+ /// unneeded capabilities to be requested
+ #[derive(Default)]
+ pub struct BuiltinVariations: u32 {
+ /// Request the standard overloads
+ const STANDARD = 1 << 0;
+ /// Request overloads that use the double type
+ const DOUBLE = 1 << 1;
+ /// Request overloads that use samplerCubeArray(Shadow)
+ const CUBE_TEXTURES_ARRAY = 1 << 2;
+ /// Request overloads that use sampler2DMSArray
+ const D2_MULTI_TEXTURES_ARRAY = 1 << 3;
+ }
+}
+
+#[derive(Debug, Default)]
+pub struct FunctionDeclaration {
+ pub overloads: Vec<Overload>,
+ /// Tracks the builtin overload variations that were already generated
+ pub variations: BuiltinVariations,
+}
+
+#[derive(Debug)]
+pub struct EntryArg {
+ pub name: Option<String>,
+ pub binding: Binding,
+ pub handle: Handle<GlobalVariable>,
+ pub storage: StorageQualifier,
+}
+
+#[derive(Debug, Clone)]
+pub struct VariableReference {
+ pub expr: Handle<Expression>,
+ /// Wether the variable is of a pointer type (and needs loading) or not
+ pub load: bool,
+ /// Wether the value of the variable can be changed or not
+ pub mutable: bool,
+ pub constant: Option<(Handle<Constant>, Handle<Type>)>,
+ pub entry_arg: Option<usize>,
+}
+
+#[derive(Debug, Clone)]
+pub struct HirExpr {
+ pub kind: HirExprKind,
+ pub meta: Span,
+}
+
+#[derive(Debug, Clone)]
+pub enum HirExprKind {
+ Access {
+ base: Handle<HirExpr>,
+ index: Handle<HirExpr>,
+ },
+ Select {
+ base: Handle<HirExpr>,
+ field: String,
+ },
+ Constant(Handle<Constant>),
+ Binary {
+ left: Handle<HirExpr>,
+ op: BinaryOperator,
+ right: Handle<HirExpr>,
+ },
+ Unary {
+ op: UnaryOperator,
+ expr: Handle<HirExpr>,
+ },
+ Variable(VariableReference),
+ Call(FunctionCall),
+ /// Represents the ternary operator in glsl (`:?`)
+ Conditional {
+ /// The expression that will decide which branch to take, must evaluate to a boolean
+ condition: Handle<HirExpr>,
+ /// The expression that will be evaluated if [`condition`] returns `true`
+ ///
+ /// [`condition`]: Self::Conditional::condition
+ accept: Handle<HirExpr>,
+ /// The expression that will be evaluated if [`condition`] returns `false`
+ ///
+ /// [`condition`]: Self::Conditional::condition
+ reject: Handle<HirExpr>,
+ },
+ Assign {
+ tgt: Handle<HirExpr>,
+ value: Handle<HirExpr>,
+ },
+ /// A prefix/postfix operator like `++`
+ PrePostfix {
+ /// The operation to be performed
+ op: BinaryOperator,
+ /// Whether this is a postfix or a prefix
+ postfix: bool,
+ /// The target expression
+ expr: Handle<HirExpr>,
+ },
+ /// A method call like `what.something(a, b, c)`
+ Method {
+ /// expression the method call applies to (`what` in the example)
+ expr: Handle<HirExpr>,
+ /// the method name (`something` in the example)
+ name: String,
+ /// the arguments to the method (`a`, `b`, and `c` in the example)
+ args: Vec<Handle<HirExpr>>,
+ },
+}
+
+#[derive(Debug, Hash, PartialEq, Eq)]
+pub enum QualifierKey<'a> {
+ String(Cow<'a, str>),
+ /// Used for `std140` and `std430` layout qualifiers
+ Layout,
+ /// Used for image formats
+ Format,
+}
+
+#[derive(Debug)]
+pub enum QualifierValue {
+ None,
+ Uint(u32),
+ Layout(StructLayout),
+ Format(crate::StorageFormat),
+}
+
+#[derive(Debug, Default)]
+pub struct TypeQualifiers<'a> {
+ pub span: Span,
+ pub storage: (StorageQualifier, Span),
+ pub invariant: Option<Span>,
+ pub interpolation: Option<(Interpolation, Span)>,
+ pub precision: Option<(Precision, Span)>,
+ pub sampling: Option<(Sampling, Span)>,
+ /// Memory qualifiers used in the declaration to set the storage access to be used
+ /// in declarations that support it (storage images and buffers)
+ pub storage_access: Option<(StorageAccess, Span)>,
+ pub layout_qualifiers: crate::FastHashMap<QualifierKey<'a>, (QualifierValue, Span)>,
+}
+
+impl<'a> TypeQualifiers<'a> {
+ /// Appends `errors` with errors for all unused qualifiers
+ pub fn unused_errors(&self, errors: &mut Vec<super::Error>) {
+ if let Some(meta) = self.invariant {
+ errors.push(super::Error {
+ kind: super::ErrorKind::SemanticError(
+ "Invariant qualifier can only be used in in/out variables".into(),
+ ),
+ meta,
+ });
+ }
+
+ if let Some((_, meta)) = self.interpolation {
+ errors.push(super::Error {
+ kind: super::ErrorKind::SemanticError(
+ "Interpolation qualifiers can only be used in in/out variables".into(),
+ ),
+ meta,
+ });
+ }
+
+ if let Some((_, meta)) = self.sampling {
+ errors.push(super::Error {
+ kind: super::ErrorKind::SemanticError(
+ "Sampling qualifiers can only be used in in/out variables".into(),
+ ),
+ meta,
+ });
+ }
+
+ if let Some((_, meta)) = self.storage_access {
+ errors.push(super::Error {
+ kind: super::ErrorKind::SemanticError(
+ "Memory qualifiers can only be used in storage variables".into(),
+ ),
+ meta,
+ });
+ }
+
+ for &(_, meta) in self.layout_qualifiers.values() {
+ errors.push(super::Error {
+ kind: super::ErrorKind::SemanticError("Unexpected qualifier".into()),
+ meta,
+ });
+ }
+ }
+
+ /// Removes the layout qualifier with `name`, if it exists and adds an error if it isn't
+ /// a [`QualifierValue::Uint`]
+ pub fn uint_layout_qualifier(
+ &mut self,
+ name: &'a str,
+ errors: &mut Vec<super::Error>,
+ ) -> Option<u32> {
+ match self
+ .layout_qualifiers
+ .remove(&QualifierKey::String(name.into()))
+ {
+ Some((QualifierValue::Uint(v), _)) => Some(v),
+ Some((_, meta)) => {
+ errors.push(super::Error {
+ kind: super::ErrorKind::SemanticError("Qualifier expects a uint value".into()),
+ meta,
+ });
+ // Return a dummy value instead of `None` to differentiate from
+ // the qualifier not existing, since some parts might require the
+ // qualifier to exist and throwing another error that it doesn't
+ // exist would be unhelpful
+ Some(0)
+ }
+ _ => None,
+ }
+ }
+
+ /// Removes the layout qualifier with `name`, if it exists and adds an error if it isn't
+ /// a [`QualifierValue::None`]
+ pub fn none_layout_qualifier(&mut self, name: &'a str, errors: &mut Vec<super::Error>) -> bool {
+ match self
+ .layout_qualifiers
+ .remove(&QualifierKey::String(name.into()))
+ {
+ Some((QualifierValue::None, _)) => true,
+ Some((_, meta)) => {
+ errors.push(super::Error {
+ kind: super::ErrorKind::SemanticError(
+ "Qualifier doesn't expect a value".into(),
+ ),
+ meta,
+ });
+ // Return a `true` to since the qualifier is defined and adding
+ // another error for it not being defined would be unhelpful
+ true
+ }
+ _ => false,
+ }
+ }
+}
+
+#[derive(Debug, Clone)]
+pub enum FunctionCallKind {
+ TypeConstructor(Handle<Type>),
+ Function(String),
+}
+
+#[derive(Debug, Clone)]
+pub struct FunctionCall {
+ pub kind: FunctionCallKind,
+ pub args: Vec<Handle<HirExpr>>,
+}
+
+#[derive(Debug, Clone, Copy, PartialEq)]
+pub enum StorageQualifier {
+ AddressSpace(AddressSpace),
+ Input,
+ Output,
+ Const,
+}
+
+impl Default for StorageQualifier {
+ fn default() -> Self {
+ StorageQualifier::AddressSpace(AddressSpace::Function)
+ }
+}
+
+#[derive(Debug, Clone, Copy, PartialEq, Eq)]
+pub enum StructLayout {
+ Std140,
+ Std430,
+}
+
+// TODO: Encode precision hints in the IR
+/// A precision hint used in GLSL declarations.
+///
+/// Precision hints can be used to either speed up shader execution or control
+/// the precision of arithmetic operations.
+///
+/// To use a precision hint simply add it before the type in the declaration.
+/// ```glsl
+/// mediump float a;
+/// ```
+///
+/// The default when no precision is declared is `highp` which means that all
+/// operations operate with the type defined width.
+///
+/// For `mediump` and `lowp` operations follow the spir-v
+/// [`RelaxedPrecision`][RelaxedPrecision] decoration semantics.
+///
+/// [RelaxedPrecision]: https://www.khronos.org/registry/SPIR-V/specs/unified1/SPIRV.html#_a_id_relaxedprecisionsection_a_relaxed_precision
+#[derive(Debug, Clone, PartialEq, Copy)]
+pub enum Precision {
+ /// `lowp` precision
+ Low,
+ /// `mediump` precision
+ Medium,
+ /// `highp` precision
+ High,
+}
+
+#[derive(Debug, Clone, PartialEq, Copy)]
+pub enum ParameterQualifier {
+ In,
+ Out,
+ InOut,
+ Const,
+}
+
+impl ParameterQualifier {
+ /// Returns true if the argument should be passed as a lhs expression
+ pub const fn is_lhs(&self) -> bool {
+ match *self {
+ ParameterQualifier::Out | ParameterQualifier::InOut => true,
+ _ => false,
+ }
+ }
+
+ /// Converts from a parameter qualifier into a [`ExprPos`](ExprPos)
+ pub const fn as_pos(&self) -> ExprPos {
+ match *self {
+ ParameterQualifier::Out | ParameterQualifier::InOut => ExprPos::Lhs,
+ _ => ExprPos::Rhs,
+ }
+ }
+}
+
+/// The GLSL profile used by a shader.
+#[derive(Debug, Clone, Copy, PartialEq)]
+pub enum Profile {
+ /// The `core` profile, default when no profile is specified.
+ Core,
+}
diff --git a/third_party/rust/naga/src/front/glsl/builtins.rs b/third_party/rust/naga/src/front/glsl/builtins.rs
new file mode 100644
index 0000000000..14e223d6a4
--- /dev/null
+++ b/third_party/rust/naga/src/front/glsl/builtins.rs
@@ -0,0 +1,2497 @@
+use super::{
+ ast::{
+ BuiltinVariations, FunctionDeclaration, FunctionKind, Overload, ParameterInfo,
+ ParameterQualifier,
+ },
+ context::Context,
+ Error, ErrorKind, Frontend, Result,
+};
+use crate::{
+ BinaryOperator, Block, Constant, DerivativeAxis as Axis, DerivativeControl as Ctrl, Expression,
+ Handle, ImageClass, ImageDimension as Dim, ImageQuery, MathFunction, Module,
+ RelationalFunction, SampleLevel, ScalarKind as Sk, Span, Type, TypeInner, UnaryOperator,
+ VectorSize,
+};
+
+impl crate::ScalarKind {
+ const fn dummy_storage_format(&self) -> crate::StorageFormat {
+ match *self {
+ Sk::Sint => crate::StorageFormat::R16Sint,
+ Sk::Uint => crate::StorageFormat::R16Uint,
+ _ => crate::StorageFormat::R16Float,
+ }
+ }
+}
+
+impl Module {
+ /// Helper function, to create a function prototype for a builtin
+ fn add_builtin(&mut self, args: Vec<TypeInner>, builtin: MacroCall) -> Overload {
+ let mut parameters = Vec::with_capacity(args.len());
+ let mut parameters_info = Vec::with_capacity(args.len());
+
+ for arg in args {
+ parameters.push(self.types.insert(
+ Type {
+ name: None,
+ inner: arg,
+ },
+ Span::default(),
+ ));
+ parameters_info.push(ParameterInfo {
+ qualifier: ParameterQualifier::In,
+ depth: false,
+ });
+ }
+
+ Overload {
+ parameters,
+ parameters_info,
+ kind: FunctionKind::Macro(builtin),
+ defined: false,
+ internal: true,
+ void: false,
+ }
+ }
+}
+
+const fn make_coords_arg(number_of_components: usize, kind: Sk) -> TypeInner {
+ let width = 4;
+
+ match number_of_components {
+ 1 => TypeInner::Scalar { kind, width },
+ _ => TypeInner::Vector {
+ size: match number_of_components {
+ 2 => VectorSize::Bi,
+ 3 => VectorSize::Tri,
+ _ => VectorSize::Quad,
+ },
+ kind,
+ width,
+ },
+ }
+}
+
+/// Inject builtins into the declaration
+///
+/// This is done to not add a large startup cost and not increase memory
+/// usage if it isn't needed.
+pub fn inject_builtin(
+ declaration: &mut FunctionDeclaration,
+ module: &mut Module,
+ name: &str,
+ mut variations: BuiltinVariations,
+) {
+ log::trace!(
+ "{} variations: {:?} {:?}",
+ name,
+ variations,
+ declaration.variations
+ );
+ // Don't regeneate variations
+ variations.remove(declaration.variations);
+ declaration.variations |= variations;
+
+ if variations.contains(BuiltinVariations::STANDARD) {
+ inject_standard_builtins(declaration, module, name)
+ }
+
+ if variations.contains(BuiltinVariations::DOUBLE) {
+ inject_double_builtin(declaration, module, name)
+ }
+
+ let width = 4;
+ match name {
+ "texture"
+ | "textureGrad"
+ | "textureGradOffset"
+ | "textureLod"
+ | "textureLodOffset"
+ | "textureOffset"
+ | "textureProj"
+ | "textureProjGrad"
+ | "textureProjGradOffset"
+ | "textureProjLod"
+ | "textureProjLodOffset"
+ | "textureProjOffset" => {
+ let f = |kind, dim, arrayed, multi, shadow| {
+ for bits in 0..=0b11 {
+ let variant = bits & 0b1 != 0;
+ let bias = bits & 0b10 != 0;
+
+ let (proj, offset, level_type) = match name {
+ // texture(gsampler, gvec P, [float bias]);
+ "texture" => (false, false, TextureLevelType::None),
+ // textureGrad(gsampler, gvec P, gvec dPdx, gvec dPdy);
+ "textureGrad" => (false, false, TextureLevelType::Grad),
+ // textureGradOffset(gsampler, gvec P, gvec dPdx, gvec dPdy, ivec offset);
+ "textureGradOffset" => (false, true, TextureLevelType::Grad),
+ // textureLod(gsampler, gvec P, float lod);
+ "textureLod" => (false, false, TextureLevelType::Lod),
+ // textureLodOffset(gsampler, gvec P, float lod, ivec offset);
+ "textureLodOffset" => (false, true, TextureLevelType::Lod),
+ // textureOffset(gsampler, gvec+1 P, ivec offset, [float bias]);
+ "textureOffset" => (false, true, TextureLevelType::None),
+ // textureProj(gsampler, gvec+1 P, [float bias]);
+ "textureProj" => (true, false, TextureLevelType::None),
+ // textureProjGrad(gsampler, gvec+1 P, gvec dPdx, gvec dPdy);
+ "textureProjGrad" => (true, false, TextureLevelType::Grad),
+ // textureProjGradOffset(gsampler, gvec+1 P, gvec dPdx, gvec dPdy, ivec offset);
+ "textureProjGradOffset" => (true, true, TextureLevelType::Grad),
+ // textureProjLod(gsampler, gvec+1 P, float lod);
+ "textureProjLod" => (true, false, TextureLevelType::Lod),
+ // textureProjLodOffset(gsampler, gvec+1 P, gvec dPdx, gvec dPdy, ivec offset);
+ "textureProjLodOffset" => (true, true, TextureLevelType::Lod),
+ // textureProjOffset(gsampler, gvec+1 P, ivec offset, [float bias]);
+ "textureProjOffset" => (true, true, TextureLevelType::None),
+ _ => unreachable!(),
+ };
+
+ let builtin = MacroCall::Texture {
+ proj,
+ offset,
+ shadow,
+ level_type,
+ };
+
+ // Parse out the variant settings.
+ let grad = level_type == TextureLevelType::Grad;
+ let lod = level_type == TextureLevelType::Lod;
+
+ let supports_variant = proj && !shadow;
+ if variant && !supports_variant {
+ continue;
+ }
+
+ if bias && !matches!(level_type, TextureLevelType::None) {
+ continue;
+ }
+
+ // Proj doesn't work with arrayed or Cube
+ if proj && (arrayed || dim == Dim::Cube) {
+ continue;
+ }
+
+ // texture operations with offset are not supported for cube maps
+ if dim == Dim::Cube && offset {
+ continue;
+ }
+
+ // sampler2DArrayShadow can't be used in textureLod or in texture with bias
+ if (lod || bias) && arrayed && shadow && dim == Dim::D2 {
+ continue;
+ }
+
+ // TODO: glsl supports using bias with depth samplers but naga doesn't
+ if bias && shadow {
+ continue;
+ }
+
+ let class = match shadow {
+ true => ImageClass::Depth { multi },
+ false => ImageClass::Sampled { kind, multi },
+ };
+
+ let image = TypeInner::Image {
+ dim,
+ arrayed,
+ class,
+ };
+
+ let num_coords_from_dim = image_dims_to_coords_size(dim).min(3);
+ let mut num_coords = num_coords_from_dim;
+
+ if shadow && proj {
+ num_coords = 4;
+ } else if dim == Dim::D1 && shadow {
+ num_coords = 3;
+ } else if shadow {
+ num_coords += 1;
+ } else if proj {
+ if variant && num_coords == 4 {
+ // Normal form already has 4 components, no need to have a variant form.
+ continue;
+ } else if variant {
+ num_coords = 4;
+ } else {
+ num_coords += 1;
+ }
+ }
+
+ if !(dim == Dim::D1 && shadow) {
+ num_coords += arrayed as usize;
+ }
+
+ // Special case: texture(gsamplerCubeArrayShadow) kicks the shadow compare ref to a separate argument,
+ // since it would otherwise take five arguments. It also can't take a bias, nor can it be proj/grad/lod/offset
+ // (presumably because nobody asked for it, and implementation complexity?)
+ if num_coords >= 5 {
+ if lod || grad || offset || proj || bias {
+ continue;
+ }
+ debug_assert!(dim == Dim::Cube && shadow && arrayed);
+ }
+ debug_assert!(num_coords <= 5);
+
+ let vector = make_coords_arg(num_coords, Sk::Float);
+ let mut args = vec![image, vector];
+
+ if num_coords == 5 {
+ args.push(TypeInner::Scalar {
+ kind: Sk::Float,
+ width,
+ });
+ }
+
+ match level_type {
+ TextureLevelType::Lod => {
+ args.push(TypeInner::Scalar {
+ kind: Sk::Float,
+ width,
+ });
+ }
+ TextureLevelType::Grad => {
+ args.push(make_coords_arg(num_coords_from_dim, Sk::Float));
+ args.push(make_coords_arg(num_coords_from_dim, Sk::Float));
+ }
+ TextureLevelType::None => {}
+ };
+
+ if offset {
+ args.push(make_coords_arg(num_coords_from_dim, Sk::Sint));
+ }
+
+ if bias {
+ args.push(TypeInner::Scalar {
+ kind: Sk::Float,
+ width,
+ });
+ }
+
+ declaration
+ .overloads
+ .push(module.add_builtin(args, builtin));
+ }
+ };
+
+ texture_args_generator(TextureArgsOptions::SHADOW | variations.into(), f)
+ }
+ "textureSize" => {
+ let f = |kind, dim, arrayed, multi, shadow| {
+ let class = match shadow {
+ true => ImageClass::Depth { multi },
+ false => ImageClass::Sampled { kind, multi },
+ };
+
+ let image = TypeInner::Image {
+ dim,
+ arrayed,
+ class,
+ };
+
+ let mut args = vec![image];
+
+ if !multi {
+ args.push(TypeInner::Scalar {
+ kind: Sk::Sint,
+ width,
+ })
+ }
+
+ declaration
+ .overloads
+ .push(module.add_builtin(args, MacroCall::TextureSize { arrayed }))
+ };
+
+ texture_args_generator(
+ TextureArgsOptions::SHADOW | TextureArgsOptions::MULTI | variations.into(),
+ f,
+ )
+ }
+ "texelFetch" | "texelFetchOffset" => {
+ let offset = "texelFetchOffset" == name;
+ let f = |kind, dim, arrayed, multi, _shadow| {
+ // Cube images aren't supported
+ if let Dim::Cube = dim {
+ return;
+ }
+
+ let image = TypeInner::Image {
+ dim,
+ arrayed,
+ class: ImageClass::Sampled { kind, multi },
+ };
+
+ let dim_value = image_dims_to_coords_size(dim);
+ let coordinates = make_coords_arg(dim_value + arrayed as usize, Sk::Sint);
+
+ let mut args = vec![
+ image,
+ coordinates,
+ TypeInner::Scalar {
+ kind: Sk::Sint,
+ width,
+ },
+ ];
+
+ if offset {
+ args.push(make_coords_arg(dim_value, Sk::Sint));
+ }
+
+ declaration
+ .overloads
+ .push(module.add_builtin(args, MacroCall::ImageLoad { multi }))
+ };
+
+ // Don't generate shadow images since they aren't supported
+ texture_args_generator(TextureArgsOptions::MULTI | variations.into(), f)
+ }
+ "imageSize" => {
+ let f = |kind: Sk, dim, arrayed, _, _| {
+ // Naga doesn't support cube images and it's usefulness
+ // is questionable, so they won't be supported for now
+ if dim == Dim::Cube {
+ return;
+ }
+
+ let image = TypeInner::Image {
+ dim,
+ arrayed,
+ class: ImageClass::Storage {
+ format: kind.dummy_storage_format(),
+ access: crate::StorageAccess::empty(),
+ },
+ };
+
+ declaration
+ .overloads
+ .push(module.add_builtin(vec![image], MacroCall::TextureSize { arrayed }))
+ };
+
+ texture_args_generator(variations.into(), f)
+ }
+ "imageLoad" => {
+ let f = |kind: Sk, dim, arrayed, _, _| {
+ // Naga doesn't support cube images and it's usefulness
+ // is questionable, so they won't be supported for now
+ if dim == Dim::Cube {
+ return;
+ }
+
+ let image = TypeInner::Image {
+ dim,
+ arrayed,
+ class: ImageClass::Storage {
+ format: kind.dummy_storage_format(),
+ access: crate::StorageAccess::LOAD,
+ },
+ };
+
+ let dim_value = image_dims_to_coords_size(dim);
+ let mut coord_size = dim_value + arrayed as usize;
+ // > Every OpenGL API call that operates on cubemap array
+ // > textures takes layer-faces, not array layers
+ //
+ // So this means that imageCubeArray only takes a three component
+ // vector coordinate and the third component is a layer index.
+ if Dim::Cube == dim && arrayed {
+ coord_size = 3
+ }
+ let coordinates = make_coords_arg(coord_size, Sk::Sint);
+
+ let args = vec![image, coordinates];
+
+ declaration
+ .overloads
+ .push(module.add_builtin(args, MacroCall::ImageLoad { multi: false }))
+ };
+
+ // Don't generate shadow nor multisampled images since they aren't supported
+ texture_args_generator(variations.into(), f)
+ }
+ "imageStore" => {
+ let f = |kind: Sk, dim, arrayed, _, _| {
+ // Naga doesn't support cube images and it's usefulness
+ // is questionable, so they won't be supported for now
+ if dim == Dim::Cube {
+ return;
+ }
+
+ let image = TypeInner::Image {
+ dim,
+ arrayed,
+ class: ImageClass::Storage {
+ format: kind.dummy_storage_format(),
+ access: crate::StorageAccess::STORE,
+ },
+ };
+
+ let dim_value = image_dims_to_coords_size(dim);
+ let mut coord_size = dim_value + arrayed as usize;
+ // > Every OpenGL API call that operates on cubemap array
+ // > textures takes layer-faces, not array layers
+ //
+ // So this means that imageCubeArray only takes a three component
+ // vector coordinate and the third component is a layer index.
+ if Dim::Cube == dim && arrayed {
+ coord_size = 3
+ }
+ let coordinates = make_coords_arg(coord_size, Sk::Sint);
+
+ let args = vec![
+ image,
+ coordinates,
+ TypeInner::Vector {
+ size: VectorSize::Quad,
+ kind,
+ width,
+ },
+ ];
+
+ let mut overload = module.add_builtin(args, MacroCall::ImageStore);
+ overload.void = true;
+ declaration.overloads.push(overload)
+ };
+
+ // Don't generate shadow nor multisampled images since they aren't supported
+ texture_args_generator(variations.into(), f)
+ }
+ _ => {}
+ }
+}
+
+/// Injects the builtins into declaration that don't need any special variations
+fn inject_standard_builtins(
+ declaration: &mut FunctionDeclaration,
+ module: &mut Module,
+ name: &str,
+) {
+ let width = 4;
+ match name {
+ "sampler1D" | "sampler1DArray" | "sampler2D" | "sampler2DArray" | "sampler2DMS"
+ | "sampler2DMSArray" | "sampler3D" | "samplerCube" | "samplerCubeArray" => {
+ declaration.overloads.push(module.add_builtin(
+ vec![
+ TypeInner::Image {
+ dim: match name {
+ "sampler1D" | "sampler1DArray" => Dim::D1,
+ "sampler2D" | "sampler2DArray" | "sampler2DMS" | "sampler2DMSArray" => {
+ Dim::D2
+ }
+ "sampler3D" => Dim::D3,
+ _ => Dim::Cube,
+ },
+ arrayed: matches!(
+ name,
+ "sampler1DArray"
+ | "sampler2DArray"
+ | "sampler2DMSArray"
+ | "samplerCubeArray"
+ ),
+ class: ImageClass::Sampled {
+ kind: Sk::Float,
+ multi: matches!(name, "sampler2DMS" | "sampler2DMSArray"),
+ },
+ },
+ TypeInner::Sampler { comparison: false },
+ ],
+ MacroCall::Sampler,
+ ))
+ }
+ "sampler1DShadow"
+ | "sampler1DArrayShadow"
+ | "sampler2DShadow"
+ | "sampler2DArrayShadow"
+ | "samplerCubeShadow"
+ | "samplerCubeArrayShadow" => {
+ let dim = match name {
+ "sampler1DShadow" | "sampler1DArrayShadow" => Dim::D1,
+ "sampler2DShadow" | "sampler2DArrayShadow" => Dim::D2,
+ _ => Dim::Cube,
+ };
+ let arrayed = matches!(
+ name,
+ "sampler1DArrayShadow" | "sampler2DArrayShadow" | "samplerCubeArrayShadow"
+ );
+
+ for i in 0..2 {
+ let ty = TypeInner::Image {
+ dim,
+ arrayed,
+ class: match i {
+ 0 => ImageClass::Sampled {
+ kind: Sk::Float,
+ multi: false,
+ },
+ _ => ImageClass::Depth { multi: false },
+ },
+ };
+
+ declaration.overloads.push(module.add_builtin(
+ vec![ty, TypeInner::Sampler { comparison: true }],
+ MacroCall::SamplerShadow,
+ ))
+ }
+ }
+ "sin" | "exp" | "exp2" | "sinh" | "cos" | "cosh" | "tan" | "tanh" | "acos" | "asin"
+ | "log" | "log2" | "radians" | "degrees" | "asinh" | "acosh" | "atanh"
+ | "floatBitsToInt" | "floatBitsToUint" | "dFdx" | "dFdxFine" | "dFdxCoarse" | "dFdy"
+ | "dFdyFine" | "dFdyCoarse" | "fwidth" | "fwidthFine" | "fwidthCoarse" => {
+ // bits layout
+ // bit 0 through 1 - dims
+ for bits in 0..0b100 {
+ let size = match bits {
+ 0b00 => None,
+ 0b01 => Some(VectorSize::Bi),
+ 0b10 => Some(VectorSize::Tri),
+ _ => Some(VectorSize::Quad),
+ };
+ let kind = Sk::Float;
+
+ declaration.overloads.push(module.add_builtin(
+ vec![match size {
+ Some(size) => TypeInner::Vector { size, kind, width },
+ None => TypeInner::Scalar { kind, width },
+ }],
+ match name {
+ "sin" => MacroCall::MathFunction(MathFunction::Sin),
+ "exp" => MacroCall::MathFunction(MathFunction::Exp),
+ "exp2" => MacroCall::MathFunction(MathFunction::Exp2),
+ "sinh" => MacroCall::MathFunction(MathFunction::Sinh),
+ "cos" => MacroCall::MathFunction(MathFunction::Cos),
+ "cosh" => MacroCall::MathFunction(MathFunction::Cosh),
+ "tan" => MacroCall::MathFunction(MathFunction::Tan),
+ "tanh" => MacroCall::MathFunction(MathFunction::Tanh),
+ "acos" => MacroCall::MathFunction(MathFunction::Acos),
+ "asin" => MacroCall::MathFunction(MathFunction::Asin),
+ "log" => MacroCall::MathFunction(MathFunction::Log),
+ "log2" => MacroCall::MathFunction(MathFunction::Log2),
+ "asinh" => MacroCall::MathFunction(MathFunction::Asinh),
+ "acosh" => MacroCall::MathFunction(MathFunction::Acosh),
+ "atanh" => MacroCall::MathFunction(MathFunction::Atanh),
+ "radians" => MacroCall::MathFunction(MathFunction::Radians),
+ "degrees" => MacroCall::MathFunction(MathFunction::Degrees),
+ "floatBitsToInt" => MacroCall::BitCast(Sk::Sint),
+ "floatBitsToUint" => MacroCall::BitCast(Sk::Uint),
+ "dFdxCoarse" => MacroCall::Derivate(Axis::X, Ctrl::Coarse),
+ "dFdyCoarse" => MacroCall::Derivate(Axis::Y, Ctrl::Coarse),
+ "fwidthCoarse" => MacroCall::Derivate(Axis::Width, Ctrl::Coarse),
+ "dFdxFine" => MacroCall::Derivate(Axis::X, Ctrl::Fine),
+ "dFdyFine" => MacroCall::Derivate(Axis::Y, Ctrl::Fine),
+ "fwidthFine" => MacroCall::Derivate(Axis::Width, Ctrl::Fine),
+ "dFdx" => MacroCall::Derivate(Axis::X, Ctrl::None),
+ "dFdy" => MacroCall::Derivate(Axis::Y, Ctrl::None),
+ "fwidth" => MacroCall::Derivate(Axis::Width, Ctrl::None),
+ _ => unreachable!(),
+ },
+ ))
+ }
+ }
+ "intBitsToFloat" | "uintBitsToFloat" => {
+ // bits layout
+ // bit 0 through 1 - dims
+ for bits in 0..0b100 {
+ let size = match bits {
+ 0b00 => None,
+ 0b01 => Some(VectorSize::Bi),
+ 0b10 => Some(VectorSize::Tri),
+ _ => Some(VectorSize::Quad),
+ };
+ let kind = match name {
+ "intBitsToFloat" => Sk::Sint,
+ _ => Sk::Uint,
+ };
+
+ declaration.overloads.push(module.add_builtin(
+ vec![match size {
+ Some(size) => TypeInner::Vector { size, kind, width },
+ None => TypeInner::Scalar { kind, width },
+ }],
+ MacroCall::BitCast(Sk::Float),
+ ))
+ }
+ }
+ "pow" => {
+ // bits layout
+ // bit 0 through 1 - dims
+ for bits in 0..0b100 {
+ let size = match bits {
+ 0b00 => None,
+ 0b01 => Some(VectorSize::Bi),
+ 0b10 => Some(VectorSize::Tri),
+ _ => Some(VectorSize::Quad),
+ };
+ let kind = Sk::Float;
+ let ty = || match size {
+ Some(size) => TypeInner::Vector { size, kind, width },
+ None => TypeInner::Scalar { kind, width },
+ };
+
+ declaration.overloads.push(
+ module
+ .add_builtin(vec![ty(), ty()], MacroCall::MathFunction(MathFunction::Pow)),
+ )
+ }
+ }
+ "abs" | "sign" => {
+ // bits layout
+ // bit 0 through 1 - dims
+ // bit 2 - float/sint
+ for bits in 0..0b1000 {
+ let size = match bits & 0b11 {
+ 0b00 => None,
+ 0b01 => Some(VectorSize::Bi),
+ 0b10 => Some(VectorSize::Tri),
+ _ => Some(VectorSize::Quad),
+ };
+ let kind = match bits >> 2 {
+ 0b0 => Sk::Float,
+ _ => Sk::Sint,
+ };
+
+ let args = vec![match size {
+ Some(size) => TypeInner::Vector { size, kind, width },
+ None => TypeInner::Scalar { kind, width },
+ }];
+
+ declaration.overloads.push(module.add_builtin(
+ args,
+ MacroCall::MathFunction(match name {
+ "abs" => MathFunction::Abs,
+ "sign" => MathFunction::Sign,
+ _ => unreachable!(),
+ }),
+ ))
+ }
+ }
+ "bitCount" | "bitfieldReverse" | "bitfieldExtract" | "bitfieldInsert" | "findLSB"
+ | "findMSB" => {
+ let fun = match name {
+ "bitCount" => MathFunction::CountOneBits,
+ "bitfieldReverse" => MathFunction::ReverseBits,
+ "bitfieldExtract" => MathFunction::ExtractBits,
+ "bitfieldInsert" => MathFunction::InsertBits,
+ "findLSB" => MathFunction::FindLsb,
+ "findMSB" => MathFunction::FindMsb,
+ _ => unreachable!(),
+ };
+
+ let mc = match fun {
+ MathFunction::ExtractBits => MacroCall::BitfieldExtract,
+ MathFunction::InsertBits => MacroCall::BitfieldInsert,
+ _ => MacroCall::MathFunction(fun),
+ };
+
+ // bits layout
+ // bit 0 - int/uint
+ // bit 1 through 2 - dims
+ for bits in 0..0b1000 {
+ let kind = match bits & 0b1 {
+ 0b0 => Sk::Sint,
+ _ => Sk::Uint,
+ };
+ let size = match bits >> 1 {
+ 0b00 => None,
+ 0b01 => Some(VectorSize::Bi),
+ 0b10 => Some(VectorSize::Tri),
+ _ => Some(VectorSize::Quad),
+ };
+
+ let ty = || match size {
+ Some(size) => TypeInner::Vector { size, kind, width },
+ None => TypeInner::Scalar { kind, width },
+ };
+
+ let mut args = vec![ty()];
+
+ match fun {
+ MathFunction::ExtractBits => {
+ args.push(TypeInner::Scalar {
+ kind: Sk::Sint,
+ width: 4,
+ });
+ args.push(TypeInner::Scalar {
+ kind: Sk::Sint,
+ width: 4,
+ });
+ }
+ MathFunction::InsertBits => {
+ args.push(ty());
+ args.push(TypeInner::Scalar {
+ kind: Sk::Sint,
+ width: 4,
+ });
+ args.push(TypeInner::Scalar {
+ kind: Sk::Sint,
+ width: 4,
+ });
+ }
+ _ => {}
+ }
+
+ // we need to cast the return type of findLsb / findMsb
+ let mc = if kind == Sk::Uint {
+ match mc {
+ MacroCall::MathFunction(MathFunction::FindLsb) => MacroCall::FindLsbUint,
+ MacroCall::MathFunction(MathFunction::FindMsb) => MacroCall::FindMsbUint,
+ mc => mc,
+ }
+ } else {
+ mc
+ };
+
+ declaration.overloads.push(module.add_builtin(args, mc))
+ }
+ }
+ "packSnorm4x8" | "packUnorm4x8" | "packSnorm2x16" | "packUnorm2x16" | "packHalf2x16" => {
+ let fun = match name {
+ "packSnorm4x8" => MathFunction::Pack4x8snorm,
+ "packUnorm4x8" => MathFunction::Pack4x8unorm,
+ "packSnorm2x16" => MathFunction::Pack2x16unorm,
+ "packUnorm2x16" => MathFunction::Pack2x16snorm,
+ "packHalf2x16" => MathFunction::Pack2x16float,
+ _ => unreachable!(),
+ };
+
+ let ty = match fun {
+ MathFunction::Pack4x8snorm | MathFunction::Pack4x8unorm => TypeInner::Vector {
+ size: crate::VectorSize::Quad,
+ kind: Sk::Float,
+ width: 4,
+ },
+ MathFunction::Pack2x16unorm
+ | MathFunction::Pack2x16snorm
+ | MathFunction::Pack2x16float => TypeInner::Vector {
+ size: crate::VectorSize::Bi,
+ kind: Sk::Float,
+ width: 4,
+ },
+ _ => unreachable!(),
+ };
+
+ let args = vec![ty];
+
+ declaration
+ .overloads
+ .push(module.add_builtin(args, MacroCall::MathFunction(fun)));
+ }
+ "unpackSnorm4x8" | "unpackUnorm4x8" | "unpackSnorm2x16" | "unpackUnorm2x16"
+ | "unpackHalf2x16" => {
+ let fun = match name {
+ "unpackSnorm4x8" => MathFunction::Unpack4x8snorm,
+ "unpackUnorm4x8" => MathFunction::Unpack4x8unorm,
+ "unpackSnorm2x16" => MathFunction::Unpack2x16snorm,
+ "unpackUnorm2x16" => MathFunction::Unpack2x16unorm,
+ "unpackHalf2x16" => MathFunction::Unpack2x16float,
+ _ => unreachable!(),
+ };
+
+ let args = vec![TypeInner::Scalar {
+ kind: Sk::Uint,
+ width: 4,
+ }];
+
+ declaration
+ .overloads
+ .push(module.add_builtin(args, MacroCall::MathFunction(fun)));
+ }
+ "atan" => {
+ // bits layout
+ // bit 0 - atan/atan2
+ // bit 1 through 2 - dims
+ for bits in 0..0b1000 {
+ let fun = match bits & 0b1 {
+ 0b0 => MathFunction::Atan,
+ _ => MathFunction::Atan2,
+ };
+ let size = match bits >> 1 {
+ 0b00 => None,
+ 0b01 => Some(VectorSize::Bi),
+ 0b10 => Some(VectorSize::Tri),
+ _ => Some(VectorSize::Quad),
+ };
+ let kind = Sk::Float;
+ let ty = || match size {
+ Some(size) => TypeInner::Vector { size, kind, width },
+ None => TypeInner::Scalar { kind, width },
+ };
+
+ let mut args = vec![ty()];
+
+ if fun == MathFunction::Atan2 {
+ args.push(ty())
+ }
+
+ declaration
+ .overloads
+ .push(module.add_builtin(args, MacroCall::MathFunction(fun)))
+ }
+ }
+ "all" | "any" | "not" => {
+ // bits layout
+ // bit 0 through 1 - dims
+ for bits in 0..0b11 {
+ let size = match bits {
+ 0b00 => VectorSize::Bi,
+ 0b01 => VectorSize::Tri,
+ _ => VectorSize::Quad,
+ };
+
+ let args = vec![TypeInner::Vector {
+ size,
+ kind: Sk::Bool,
+ width: crate::BOOL_WIDTH,
+ }];
+
+ let fun = match name {
+ "all" => MacroCall::Relational(RelationalFunction::All),
+ "any" => MacroCall::Relational(RelationalFunction::Any),
+ "not" => MacroCall::Unary(UnaryOperator::Not),
+ _ => unreachable!(),
+ };
+
+ declaration.overloads.push(module.add_builtin(args, fun))
+ }
+ }
+ "lessThan" | "greaterThan" | "lessThanEqual" | "greaterThanEqual" => {
+ for bits in 0..0b1001 {
+ let (size, kind) = match bits {
+ 0b0000 => (VectorSize::Bi, Sk::Float),
+ 0b0001 => (VectorSize::Tri, Sk::Float),
+ 0b0010 => (VectorSize::Quad, Sk::Float),
+ 0b0011 => (VectorSize::Bi, Sk::Sint),
+ 0b0100 => (VectorSize::Tri, Sk::Sint),
+ 0b0101 => (VectorSize::Quad, Sk::Sint),
+ 0b0110 => (VectorSize::Bi, Sk::Uint),
+ 0b0111 => (VectorSize::Tri, Sk::Uint),
+ _ => (VectorSize::Quad, Sk::Uint),
+ };
+
+ let ty = || TypeInner::Vector { size, kind, width };
+ let args = vec![ty(), ty()];
+
+ let fun = MacroCall::Binary(match name {
+ "lessThan" => BinaryOperator::Less,
+ "greaterThan" => BinaryOperator::Greater,
+ "lessThanEqual" => BinaryOperator::LessEqual,
+ "greaterThanEqual" => BinaryOperator::GreaterEqual,
+ _ => unreachable!(),
+ });
+
+ declaration.overloads.push(module.add_builtin(args, fun))
+ }
+ }
+ "equal" | "notEqual" => {
+ for bits in 0..0b1100 {
+ let (size, kind) = match bits {
+ 0b0000 => (VectorSize::Bi, Sk::Float),
+ 0b0001 => (VectorSize::Tri, Sk::Float),
+ 0b0010 => (VectorSize::Quad, Sk::Float),
+ 0b0011 => (VectorSize::Bi, Sk::Sint),
+ 0b0100 => (VectorSize::Tri, Sk::Sint),
+ 0b0101 => (VectorSize::Quad, Sk::Sint),
+ 0b0110 => (VectorSize::Bi, Sk::Uint),
+ 0b0111 => (VectorSize::Tri, Sk::Uint),
+ 0b1000 => (VectorSize::Quad, Sk::Uint),
+ 0b1001 => (VectorSize::Bi, Sk::Bool),
+ 0b1010 => (VectorSize::Tri, Sk::Bool),
+ _ => (VectorSize::Quad, Sk::Bool),
+ };
+
+ let width = if let Sk::Bool = kind {
+ crate::BOOL_WIDTH
+ } else {
+ width
+ };
+
+ let ty = || TypeInner::Vector { size, kind, width };
+ let args = vec![ty(), ty()];
+
+ let fun = MacroCall::Binary(match name {
+ "equal" => BinaryOperator::Equal,
+ "notEqual" => BinaryOperator::NotEqual,
+ _ => unreachable!(),
+ });
+
+ declaration.overloads.push(module.add_builtin(args, fun))
+ }
+ }
+ "min" | "max" => {
+ // bits layout
+ // bit 0 through 1 - scalar kind
+ // bit 2 through 4 - dims
+ for bits in 0..0b11100 {
+ let kind = match bits & 0b11 {
+ 0b00 => Sk::Float,
+ 0b01 => Sk::Sint,
+ 0b10 => Sk::Uint,
+ _ => continue,
+ };
+ let (size, second_size) = match bits >> 2 {
+ 0b000 => (None, None),
+ 0b001 => (Some(VectorSize::Bi), None),
+ 0b010 => (Some(VectorSize::Tri), None),
+ 0b011 => (Some(VectorSize::Quad), None),
+ 0b100 => (Some(VectorSize::Bi), Some(VectorSize::Bi)),
+ 0b101 => (Some(VectorSize::Tri), Some(VectorSize::Tri)),
+ _ => (Some(VectorSize::Quad), Some(VectorSize::Quad)),
+ };
+
+ let args = vec![
+ match size {
+ Some(size) => TypeInner::Vector { size, kind, width },
+ None => TypeInner::Scalar { kind, width },
+ },
+ match second_size {
+ Some(size) => TypeInner::Vector { size, kind, width },
+ None => TypeInner::Scalar { kind, width },
+ },
+ ];
+
+ let fun = match name {
+ "max" => MacroCall::Splatted(MathFunction::Max, size, 1),
+ "min" => MacroCall::Splatted(MathFunction::Min, size, 1),
+ _ => unreachable!(),
+ };
+
+ declaration.overloads.push(module.add_builtin(args, fun))
+ }
+ }
+ "mix" => {
+ // bits layout
+ // bit 0 through 1 - dims
+ // bit 2 through 4 - types
+ //
+ // 0b10011 is the last element since splatted single elements
+ // were already added
+ for bits in 0..0b10011 {
+ let size = match bits & 0b11 {
+ 0b00 => Some(VectorSize::Bi),
+ 0b01 => Some(VectorSize::Tri),
+ 0b10 => Some(VectorSize::Quad),
+ _ => None,
+ };
+ let (kind, splatted, boolean) = match bits >> 2 {
+ 0b000 => (Sk::Sint, false, true),
+ 0b001 => (Sk::Uint, false, true),
+ 0b010 => (Sk::Float, false, true),
+ 0b011 => (Sk::Float, false, false),
+ _ => (Sk::Float, true, false),
+ };
+
+ let ty = |kind, width| match size {
+ Some(size) => TypeInner::Vector { size, kind, width },
+ None => TypeInner::Scalar { kind, width },
+ };
+ let args = vec![
+ ty(kind, width),
+ ty(kind, width),
+ match (boolean, splatted) {
+ (true, _) => ty(Sk::Bool, crate::BOOL_WIDTH),
+ (_, false) => TypeInner::Scalar { kind, width },
+ _ => ty(kind, width),
+ },
+ ];
+
+ declaration.overloads.push(module.add_builtin(
+ args,
+ match boolean {
+ true => MacroCall::MixBoolean,
+ false => MacroCall::Splatted(MathFunction::Mix, size, 2),
+ },
+ ))
+ }
+ }
+ "clamp" => {
+ // bits layout
+ // bit 0 through 1 - float/int/uint
+ // bit 2 through 3 - dims
+ // bit 4 - splatted
+ //
+ // 0b11010 is the last element since splatted single elements
+ // were already added
+ for bits in 0..0b11011 {
+ let kind = match bits & 0b11 {
+ 0b00 => Sk::Float,
+ 0b01 => Sk::Sint,
+ 0b10 => Sk::Uint,
+ _ => continue,
+ };
+ let size = match (bits >> 2) & 0b11 {
+ 0b00 => Some(VectorSize::Bi),
+ 0b01 => Some(VectorSize::Tri),
+ 0b10 => Some(VectorSize::Quad),
+ _ => None,
+ };
+ let splatted = bits & 0b10000 == 0b10000;
+
+ let base_ty = || match size {
+ Some(size) => TypeInner::Vector { size, kind, width },
+ None => TypeInner::Scalar { kind, width },
+ };
+ let limit_ty = || match splatted {
+ true => TypeInner::Scalar { kind, width },
+ false => base_ty(),
+ };
+
+ let args = vec![base_ty(), limit_ty(), limit_ty()];
+
+ declaration
+ .overloads
+ .push(module.add_builtin(args, MacroCall::Clamp(size)))
+ }
+ }
+ "barrier" => declaration
+ .overloads
+ .push(module.add_builtin(Vec::new(), MacroCall::Barrier)),
+ // Add common builtins with floats
+ _ => inject_common_builtin(declaration, module, name, 4),
+ }
+}
+
+/// Injects the builtins into declaration that need doubles
+fn inject_double_builtin(declaration: &mut FunctionDeclaration, module: &mut Module, name: &str) {
+ let width = 8;
+ match name {
+ "abs" | "sign" => {
+ // bits layout
+ // bit 0 through 1 - dims
+ for bits in 0..0b100 {
+ let size = match bits {
+ 0b00 => None,
+ 0b01 => Some(VectorSize::Bi),
+ 0b10 => Some(VectorSize::Tri),
+ _ => Some(VectorSize::Quad),
+ };
+ let kind = Sk::Float;
+
+ let args = vec![match size {
+ Some(size) => TypeInner::Vector { size, kind, width },
+ None => TypeInner::Scalar { kind, width },
+ }];
+
+ declaration.overloads.push(module.add_builtin(
+ args,
+ MacroCall::MathFunction(match name {
+ "abs" => MathFunction::Abs,
+ "sign" => MathFunction::Sign,
+ _ => unreachable!(),
+ }),
+ ))
+ }
+ }
+ "min" | "max" => {
+ // bits layout
+ // bit 0 through 2 - dims
+ for bits in 0..0b111 {
+ let (size, second_size) = match bits {
+ 0b000 => (None, None),
+ 0b001 => (Some(VectorSize::Bi), None),
+ 0b010 => (Some(VectorSize::Tri), None),
+ 0b011 => (Some(VectorSize::Quad), None),
+ 0b100 => (Some(VectorSize::Bi), Some(VectorSize::Bi)),
+ 0b101 => (Some(VectorSize::Tri), Some(VectorSize::Tri)),
+ _ => (Some(VectorSize::Quad), Some(VectorSize::Quad)),
+ };
+ let kind = Sk::Float;
+
+ let args = vec![
+ match size {
+ Some(size) => TypeInner::Vector { size, kind, width },
+ None => TypeInner::Scalar { kind, width },
+ },
+ match second_size {
+ Some(size) => TypeInner::Vector { size, kind, width },
+ None => TypeInner::Scalar { kind, width },
+ },
+ ];
+
+ let fun = match name {
+ "max" => MacroCall::Splatted(MathFunction::Max, size, 1),
+ "min" => MacroCall::Splatted(MathFunction::Min, size, 1),
+ _ => unreachable!(),
+ };
+
+ declaration.overloads.push(module.add_builtin(args, fun))
+ }
+ }
+ "mix" => {
+ // bits layout
+ // bit 0 through 1 - dims
+ // bit 2 through 3 - splatted/boolean
+ //
+ // 0b1010 is the last element since splatted with single elements
+ // is equal to normal single elements
+ for bits in 0..0b1011 {
+ let size = match bits & 0b11 {
+ 0b00 => Some(VectorSize::Quad),
+ 0b01 => Some(VectorSize::Bi),
+ 0b10 => Some(VectorSize::Tri),
+ _ => None,
+ };
+ let kind = Sk::Float;
+ let (splatted, boolean) = match bits >> 2 {
+ 0b00 => (false, false),
+ 0b01 => (false, true),
+ _ => (true, false),
+ };
+
+ let ty = |kind, width| match size {
+ Some(size) => TypeInner::Vector { size, kind, width },
+ None => TypeInner::Scalar { kind, width },
+ };
+ let args = vec![
+ ty(kind, width),
+ ty(kind, width),
+ match (boolean, splatted) {
+ (true, _) => ty(Sk::Bool, crate::BOOL_WIDTH),
+ (_, false) => TypeInner::Scalar { kind, width },
+ _ => ty(kind, width),
+ },
+ ];
+
+ declaration.overloads.push(module.add_builtin(
+ args,
+ match boolean {
+ true => MacroCall::MixBoolean,
+ false => MacroCall::Splatted(MathFunction::Mix, size, 2),
+ },
+ ))
+ }
+ }
+ "clamp" => {
+ // bits layout
+ // bit 0 through 1 - dims
+ // bit 2 - splatted
+ //
+ // 0b110 is the last element since splatted with single elements
+ // is equal to normal single elements
+ for bits in 0..0b111 {
+ let kind = Sk::Float;
+ let size = match bits & 0b11 {
+ 0b00 => Some(VectorSize::Bi),
+ 0b01 => Some(VectorSize::Tri),
+ 0b10 => Some(VectorSize::Quad),
+ _ => None,
+ };
+ let splatted = bits & 0b100 == 0b100;
+
+ let base_ty = || match size {
+ Some(size) => TypeInner::Vector { size, kind, width },
+ None => TypeInner::Scalar { kind, width },
+ };
+ let limit_ty = || match splatted {
+ true => TypeInner::Scalar { kind, width },
+ false => base_ty(),
+ };
+
+ let args = vec![base_ty(), limit_ty(), limit_ty()];
+
+ declaration
+ .overloads
+ .push(module.add_builtin(args, MacroCall::Clamp(size)))
+ }
+ }
+ "lessThan" | "greaterThan" | "lessThanEqual" | "greaterThanEqual" | "equal"
+ | "notEqual" => {
+ for bits in 0..0b11 {
+ let (size, kind) = match bits {
+ 0b00 => (VectorSize::Bi, Sk::Float),
+ 0b01 => (VectorSize::Tri, Sk::Float),
+ _ => (VectorSize::Quad, Sk::Float),
+ };
+
+ let ty = || TypeInner::Vector { size, kind, width };
+ let args = vec![ty(), ty()];
+
+ let fun = MacroCall::Binary(match name {
+ "lessThan" => BinaryOperator::Less,
+ "greaterThan" => BinaryOperator::Greater,
+ "lessThanEqual" => BinaryOperator::LessEqual,
+ "greaterThanEqual" => BinaryOperator::GreaterEqual,
+ "equal" => BinaryOperator::Equal,
+ "notEqual" => BinaryOperator::NotEqual,
+ _ => unreachable!(),
+ });
+
+ declaration.overloads.push(module.add_builtin(args, fun))
+ }
+ }
+ // Add common builtins with doubles
+ _ => inject_common_builtin(declaration, module, name, 8),
+ }
+}
+
+/// Injects the builtins into declaration that can used either float or doubles
+fn inject_common_builtin(
+ declaration: &mut FunctionDeclaration,
+ module: &mut Module,
+ name: &str,
+ float_width: crate::Bytes,
+) {
+ match name {
+ "ceil" | "round" | "roundEven" | "floor" | "fract" | "trunc" | "sqrt" | "inversesqrt"
+ | "normalize" | "length" | "isinf" | "isnan" => {
+ // bits layout
+ // bit 0 through 1 - dims
+ for bits in 0..0b100 {
+ let size = match bits {
+ 0b00 => None,
+ 0b01 => Some(VectorSize::Bi),
+ 0b10 => Some(VectorSize::Tri),
+ _ => Some(VectorSize::Quad),
+ };
+
+ let args = vec![match size {
+ Some(size) => TypeInner::Vector {
+ size,
+ kind: Sk::Float,
+ width: float_width,
+ },
+ None => TypeInner::Scalar {
+ kind: Sk::Float,
+ width: float_width,
+ },
+ }];
+
+ let fun = match name {
+ "ceil" => MacroCall::MathFunction(MathFunction::Ceil),
+ "round" | "roundEven" => MacroCall::MathFunction(MathFunction::Round),
+ "floor" => MacroCall::MathFunction(MathFunction::Floor),
+ "fract" => MacroCall::MathFunction(MathFunction::Fract),
+ "trunc" => MacroCall::MathFunction(MathFunction::Trunc),
+ "sqrt" => MacroCall::MathFunction(MathFunction::Sqrt),
+ "inversesqrt" => MacroCall::MathFunction(MathFunction::InverseSqrt),
+ "normalize" => MacroCall::MathFunction(MathFunction::Normalize),
+ "length" => MacroCall::MathFunction(MathFunction::Length),
+ "isinf" => MacroCall::Relational(RelationalFunction::IsInf),
+ "isnan" => MacroCall::Relational(RelationalFunction::IsNan),
+ _ => unreachable!(),
+ };
+
+ declaration.overloads.push(module.add_builtin(args, fun))
+ }
+ }
+ "dot" | "reflect" | "distance" | "ldexp" => {
+ // bits layout
+ // bit 0 through 1 - dims
+ for bits in 0..0b100 {
+ let size = match bits {
+ 0b00 => None,
+ 0b01 => Some(VectorSize::Bi),
+ 0b10 => Some(VectorSize::Tri),
+ _ => Some(VectorSize::Quad),
+ };
+ let ty = || match size {
+ Some(size) => TypeInner::Vector {
+ size,
+ kind: Sk::Float,
+ width: float_width,
+ },
+ None => TypeInner::Scalar {
+ kind: Sk::Float,
+ width: float_width,
+ },
+ };
+
+ let fun = match name {
+ "dot" => MacroCall::MathFunction(MathFunction::Dot),
+ "reflect" => MacroCall::MathFunction(MathFunction::Reflect),
+ "distance" => MacroCall::MathFunction(MathFunction::Distance),
+ "ldexp" => MacroCall::MathFunction(MathFunction::Ldexp),
+ _ => unreachable!(),
+ };
+
+ declaration
+ .overloads
+ .push(module.add_builtin(vec![ty(), ty()], fun))
+ }
+ }
+ "transpose" => {
+ // bits layout
+ // bit 0 through 3 - dims
+ for bits in 0..0b1001 {
+ let (rows, columns) = match bits {
+ 0b0000 => (VectorSize::Bi, VectorSize::Bi),
+ 0b0001 => (VectorSize::Bi, VectorSize::Tri),
+ 0b0010 => (VectorSize::Bi, VectorSize::Quad),
+ 0b0011 => (VectorSize::Tri, VectorSize::Bi),
+ 0b0100 => (VectorSize::Tri, VectorSize::Tri),
+ 0b0101 => (VectorSize::Tri, VectorSize::Quad),
+ 0b0110 => (VectorSize::Quad, VectorSize::Bi),
+ 0b0111 => (VectorSize::Quad, VectorSize::Tri),
+ _ => (VectorSize::Quad, VectorSize::Quad),
+ };
+
+ declaration.overloads.push(module.add_builtin(
+ vec![TypeInner::Matrix {
+ columns,
+ rows,
+ width: float_width,
+ }],
+ MacroCall::MathFunction(MathFunction::Transpose),
+ ))
+ }
+ }
+ "inverse" | "determinant" => {
+ // bits layout
+ // bit 0 through 1 - dims
+ for bits in 0..0b11 {
+ let (rows, columns) = match bits {
+ 0b00 => (VectorSize::Bi, VectorSize::Bi),
+ 0b01 => (VectorSize::Tri, VectorSize::Tri),
+ _ => (VectorSize::Quad, VectorSize::Quad),
+ };
+
+ let args = vec![TypeInner::Matrix {
+ columns,
+ rows,
+ width: float_width,
+ }];
+
+ declaration.overloads.push(module.add_builtin(
+ args,
+ MacroCall::MathFunction(match name {
+ "inverse" => MathFunction::Inverse,
+ "determinant" => MathFunction::Determinant,
+ _ => unreachable!(),
+ }),
+ ))
+ }
+ }
+ "mod" | "step" => {
+ // bits layout
+ // bit 0 through 2 - dims
+ for bits in 0..0b111 {
+ let (size, second_size) = match bits {
+ 0b000 => (None, None),
+ 0b001 => (Some(VectorSize::Bi), None),
+ 0b010 => (Some(VectorSize::Tri), None),
+ 0b011 => (Some(VectorSize::Quad), None),
+ 0b100 => (Some(VectorSize::Bi), Some(VectorSize::Bi)),
+ 0b101 => (Some(VectorSize::Tri), Some(VectorSize::Tri)),
+ _ => (Some(VectorSize::Quad), Some(VectorSize::Quad)),
+ };
+
+ let mut args = Vec::with_capacity(2);
+ let step = name == "step";
+
+ for i in 0..2 {
+ let maybe_size = match i == step as u32 {
+ true => size,
+ false => second_size,
+ };
+
+ args.push(match maybe_size {
+ Some(size) => TypeInner::Vector {
+ size,
+ kind: Sk::Float,
+ width: float_width,
+ },
+ None => TypeInner::Scalar {
+ kind: Sk::Float,
+ width: float_width,
+ },
+ })
+ }
+
+ let fun = match name {
+ "mod" => MacroCall::Mod(size),
+ "step" => MacroCall::Splatted(MathFunction::Step, size, 0),
+ _ => unreachable!(),
+ };
+
+ declaration.overloads.push(module.add_builtin(args, fun))
+ }
+ }
+ "modf" | "frexp" => {
+ // bits layout
+ // bit 0 through 1 - dims
+ for bits in 0..0b100 {
+ let size = match bits {
+ 0b00 => None,
+ 0b01 => Some(VectorSize::Bi),
+ 0b10 => Some(VectorSize::Tri),
+ _ => Some(VectorSize::Quad),
+ };
+
+ let ty = module.types.insert(
+ Type {
+ name: None,
+ inner: match size {
+ Some(size) => TypeInner::Vector {
+ size,
+ kind: Sk::Float,
+ width: float_width,
+ },
+ None => TypeInner::Scalar {
+ kind: Sk::Float,
+ width: float_width,
+ },
+ },
+ },
+ Span::default(),
+ );
+
+ let parameters = vec![ty, ty];
+
+ let fun = match name {
+ "modf" => MacroCall::MathFunction(MathFunction::Modf),
+ "frexp" => MacroCall::MathFunction(MathFunction::Frexp),
+ _ => unreachable!(),
+ };
+
+ declaration.overloads.push(Overload {
+ parameters,
+ parameters_info: vec![
+ ParameterInfo {
+ qualifier: ParameterQualifier::In,
+ depth: false,
+ },
+ ParameterInfo {
+ qualifier: ParameterQualifier::Out,
+ depth: false,
+ },
+ ],
+ kind: FunctionKind::Macro(fun),
+ defined: false,
+ internal: true,
+ void: false,
+ })
+ }
+ }
+ "cross" => {
+ let args = vec![
+ TypeInner::Vector {
+ size: VectorSize::Tri,
+ kind: Sk::Float,
+ width: float_width,
+ },
+ TypeInner::Vector {
+ size: VectorSize::Tri,
+ kind: Sk::Float,
+ width: float_width,
+ },
+ ];
+
+ declaration
+ .overloads
+ .push(module.add_builtin(args, MacroCall::MathFunction(MathFunction::Cross)))
+ }
+ "outerProduct" => {
+ // bits layout
+ // bit 0 through 3 - dims
+ for bits in 0..0b1001 {
+ let (size1, size2) = match bits {
+ 0b0000 => (VectorSize::Bi, VectorSize::Bi),
+ 0b0001 => (VectorSize::Bi, VectorSize::Tri),
+ 0b0010 => (VectorSize::Bi, VectorSize::Quad),
+ 0b0011 => (VectorSize::Tri, VectorSize::Bi),
+ 0b0100 => (VectorSize::Tri, VectorSize::Tri),
+ 0b0101 => (VectorSize::Tri, VectorSize::Quad),
+ 0b0110 => (VectorSize::Quad, VectorSize::Bi),
+ 0b0111 => (VectorSize::Quad, VectorSize::Tri),
+ _ => (VectorSize::Quad, VectorSize::Quad),
+ };
+
+ let args = vec![
+ TypeInner::Vector {
+ size: size1,
+ kind: Sk::Float,
+ width: float_width,
+ },
+ TypeInner::Vector {
+ size: size2,
+ kind: Sk::Float,
+ width: float_width,
+ },
+ ];
+
+ declaration
+ .overloads
+ .push(module.add_builtin(args, MacroCall::MathFunction(MathFunction::Outer)))
+ }
+ }
+ "faceforward" | "fma" => {
+ // bits layout
+ // bit 0 through 1 - dims
+ for bits in 0..0b100 {
+ let size = match bits {
+ 0b00 => None,
+ 0b01 => Some(VectorSize::Bi),
+ 0b10 => Some(VectorSize::Tri),
+ _ => Some(VectorSize::Quad),
+ };
+
+ let ty = || match size {
+ Some(size) => TypeInner::Vector {
+ size,
+ kind: Sk::Float,
+ width: float_width,
+ },
+ None => TypeInner::Scalar {
+ kind: Sk::Float,
+ width: float_width,
+ },
+ };
+ let args = vec![ty(), ty(), ty()];
+
+ let fun = match name {
+ "faceforward" => MacroCall::MathFunction(MathFunction::FaceForward),
+ "fma" => MacroCall::MathFunction(MathFunction::Fma),
+ _ => unreachable!(),
+ };
+
+ declaration.overloads.push(module.add_builtin(args, fun))
+ }
+ }
+ "refract" => {
+ // bits layout
+ // bit 0 through 1 - dims
+ for bits in 0..0b100 {
+ let size = match bits {
+ 0b00 => None,
+ 0b01 => Some(VectorSize::Bi),
+ 0b10 => Some(VectorSize::Tri),
+ _ => Some(VectorSize::Quad),
+ };
+
+ let ty = || match size {
+ Some(size) => TypeInner::Vector {
+ size,
+ kind: Sk::Float,
+ width: float_width,
+ },
+ None => TypeInner::Scalar {
+ kind: Sk::Float,
+ width: float_width,
+ },
+ };
+ let args = vec![
+ ty(),
+ ty(),
+ TypeInner::Scalar {
+ kind: Sk::Float,
+ width: 4,
+ },
+ ];
+ declaration
+ .overloads
+ .push(module.add_builtin(args, MacroCall::MathFunction(MathFunction::Refract)))
+ }
+ }
+ "smoothstep" => {
+ // bit 0 - splatted
+ // bit 1 through 2 - dims
+ for bits in 0..0b1000 {
+ let splatted = bits & 0b1 == 0b1;
+ let size = match bits >> 1 {
+ 0b00 => None,
+ 0b01 => Some(VectorSize::Bi),
+ 0b10 => Some(VectorSize::Tri),
+ _ => Some(VectorSize::Quad),
+ };
+
+ if splatted && size.is_none() {
+ continue;
+ }
+
+ let base_ty = || match size {
+ Some(size) => TypeInner::Vector {
+ size,
+ kind: Sk::Float,
+ width: float_width,
+ },
+ None => TypeInner::Scalar {
+ kind: Sk::Float,
+ width: float_width,
+ },
+ };
+ let ty = || match splatted {
+ true => TypeInner::Scalar {
+ kind: Sk::Float,
+ width: float_width,
+ },
+ false => base_ty(),
+ };
+ declaration.overloads.push(module.add_builtin(
+ vec![ty(), ty(), base_ty()],
+ MacroCall::SmoothStep { splatted: size },
+ ))
+ }
+ }
+ // The function isn't a builtin or we don't yet support it
+ _ => {}
+ }
+}
+
+#[derive(Clone, Copy, PartialEq, Debug)]
+pub enum TextureLevelType {
+ None,
+ Lod,
+ Grad,
+}
+
+/// A compiler defined builtin function
+#[derive(Clone, Copy, PartialEq, Debug)]
+pub enum MacroCall {
+ Sampler,
+ SamplerShadow,
+ Texture {
+ proj: bool,
+ offset: bool,
+ shadow: bool,
+ level_type: TextureLevelType,
+ },
+ TextureSize {
+ arrayed: bool,
+ },
+ ImageLoad {
+ multi: bool,
+ },
+ ImageStore,
+ MathFunction(MathFunction),
+ FindLsbUint,
+ FindMsbUint,
+ BitfieldExtract,
+ BitfieldInsert,
+ Relational(RelationalFunction),
+ Unary(UnaryOperator),
+ Binary(BinaryOperator),
+ Mod(Option<VectorSize>),
+ Splatted(MathFunction, Option<VectorSize>, usize),
+ MixBoolean,
+ Clamp(Option<VectorSize>),
+ BitCast(Sk),
+ Derivate(Axis, Ctrl),
+ Barrier,
+ /// SmoothStep needs a separate variant because it might need it's inputs
+ /// to be splatted depending on the overload
+ SmoothStep {
+ /// The size of the splat operation if some
+ splatted: Option<VectorSize>,
+ },
+}
+
+impl MacroCall {
+ /// Adds the necessary expressions and statements to the passed body and
+ /// finally returns the final expression with the correct result
+ pub fn call(
+ &self,
+ frontend: &mut Frontend,
+ ctx: &mut Context,
+ body: &mut Block,
+ args: &mut [Handle<Expression>],
+ meta: Span,
+ ) -> Result<Option<Handle<Expression>>> {
+ Ok(Some(match *self {
+ MacroCall::Sampler => {
+ ctx.samplers.insert(args[0], args[1]);
+ args[0]
+ }
+ MacroCall::SamplerShadow => {
+ sampled_to_depth(
+ &mut frontend.module,
+ ctx,
+ args[0],
+ meta,
+ &mut frontend.errors,
+ );
+ frontend.invalidate_expression(ctx, args[0], meta)?;
+ ctx.samplers.insert(args[0], args[1]);
+ args[0]
+ }
+ MacroCall::Texture {
+ proj,
+ offset,
+ shadow,
+ level_type,
+ } => {
+ let mut coords = args[1];
+
+ if proj {
+ let size = match *frontend.resolve_type(ctx, coords, meta)? {
+ TypeInner::Vector { size, .. } => size,
+ _ => unreachable!(),
+ };
+ let mut right = ctx.add_expression(
+ Expression::AccessIndex {
+ base: coords,
+ index: size as u32 - 1,
+ },
+ Span::default(),
+ body,
+ );
+ let left = if let VectorSize::Bi = size {
+ ctx.add_expression(
+ Expression::AccessIndex {
+ base: coords,
+ index: 0,
+ },
+ Span::default(),
+ body,
+ )
+ } else {
+ let size = match size {
+ VectorSize::Tri => VectorSize::Bi,
+ _ => VectorSize::Tri,
+ };
+ right = ctx.add_expression(
+ Expression::Splat { size, value: right },
+ Span::default(),
+ body,
+ );
+ ctx.vector_resize(size, coords, Span::default(), body)
+ };
+ coords = ctx.add_expression(
+ Expression::Binary {
+ op: BinaryOperator::Divide,
+ left,
+ right,
+ },
+ Span::default(),
+ body,
+ );
+ }
+
+ let extra = args.get(2).copied();
+ let comps =
+ frontend.coordinate_components(ctx, args[0], coords, extra, meta, body)?;
+
+ let mut num_args = 2;
+
+ if comps.used_extra {
+ num_args += 1;
+ };
+
+ // Parse out explicit texture level.
+ let mut level = match level_type {
+ TextureLevelType::None => SampleLevel::Auto,
+
+ TextureLevelType::Lod => {
+ num_args += 1;
+
+ if shadow {
+ log::warn!("Assuming LOD {:?} is zero", args[2],);
+
+ SampleLevel::Zero
+ } else {
+ SampleLevel::Exact(args[2])
+ }
+ }
+
+ TextureLevelType::Grad => {
+ num_args += 2;
+
+ if shadow {
+ log::warn!(
+ "Assuming gradients {:?} and {:?} are not greater than 1",
+ args[2],
+ args[3],
+ );
+ SampleLevel::Zero
+ } else {
+ SampleLevel::Gradient {
+ x: args[2],
+ y: args[3],
+ }
+ }
+ }
+ };
+
+ let texture_offset = match offset {
+ true => {
+ let offset_arg = args[num_args];
+ num_args += 1;
+ match frontend.solve_constant(ctx, offset_arg, meta) {
+ Ok(v) => Some(v),
+ Err(e) => {
+ frontend.errors.push(e);
+ None
+ }
+ }
+ }
+ false => None,
+ };
+
+ // Now go back and look for optional bias arg (if available)
+ if let TextureLevelType::None = level_type {
+ level = args
+ .get(num_args)
+ .copied()
+ .map_or(SampleLevel::Auto, SampleLevel::Bias);
+ }
+
+ texture_call(ctx, args[0], level, comps, texture_offset, body, meta)?
+ }
+
+ MacroCall::TextureSize { arrayed } => {
+ let mut expr = ctx.add_expression(
+ Expression::ImageQuery {
+ image: args[0],
+ query: ImageQuery::Size {
+ level: args.get(1).copied(),
+ },
+ },
+ Span::default(),
+ body,
+ );
+
+ if arrayed {
+ let mut components = Vec::with_capacity(4);
+
+ let size = match *frontend.resolve_type(ctx, expr, meta)? {
+ TypeInner::Vector { size: ori_size, .. } => {
+ for index in 0..(ori_size as u32) {
+ components.push(ctx.add_expression(
+ Expression::AccessIndex { base: expr, index },
+ Span::default(),
+ body,
+ ))
+ }
+
+ match ori_size {
+ VectorSize::Bi => VectorSize::Tri,
+ _ => VectorSize::Quad,
+ }
+ }
+ _ => {
+ components.push(expr);
+ VectorSize::Bi
+ }
+ };
+
+ components.push(ctx.add_expression(
+ Expression::ImageQuery {
+ image: args[0],
+ query: ImageQuery::NumLayers,
+ },
+ Span::default(),
+ body,
+ ));
+
+ let ty = frontend.module.types.insert(
+ Type {
+ name: None,
+ inner: TypeInner::Vector {
+ size,
+ kind: crate::ScalarKind::Uint,
+ width: 4,
+ },
+ },
+ Span::default(),
+ );
+
+ expr = ctx.add_expression(Expression::Compose { components, ty }, meta, body)
+ }
+
+ ctx.add_expression(
+ Expression::As {
+ expr,
+ kind: Sk::Sint,
+ convert: Some(4),
+ },
+ Span::default(),
+ body,
+ )
+ }
+ MacroCall::ImageLoad { multi } => {
+ let comps =
+ frontend.coordinate_components(ctx, args[0], args[1], None, meta, body)?;
+ let (sample, level) = match (multi, args.get(2)) {
+ (_, None) => (None, None),
+ (true, Some(&arg)) => (Some(arg), None),
+ (false, Some(&arg)) => (None, Some(arg)),
+ };
+ ctx.add_expression(
+ Expression::ImageLoad {
+ image: args[0],
+ coordinate: comps.coordinate,
+ array_index: comps.array_index,
+ sample,
+ level,
+ },
+ Span::default(),
+ body,
+ )
+ }
+ MacroCall::ImageStore => {
+ let comps =
+ frontend.coordinate_components(ctx, args[0], args[1], None, meta, body)?;
+ ctx.emit_restart(body);
+ body.push(
+ crate::Statement::ImageStore {
+ image: args[0],
+ coordinate: comps.coordinate,
+ array_index: comps.array_index,
+ value: args[2],
+ },
+ meta,
+ );
+ return Ok(None);
+ }
+ MacroCall::MathFunction(fun) => ctx.add_expression(
+ Expression::Math {
+ fun,
+ arg: args[0],
+ arg1: args.get(1).copied(),
+ arg2: args.get(2).copied(),
+ arg3: args.get(3).copied(),
+ },
+ Span::default(),
+ body,
+ ),
+ mc @ (MacroCall::FindLsbUint | MacroCall::FindMsbUint) => {
+ let fun = match mc {
+ MacroCall::FindLsbUint => MathFunction::FindLsb,
+ MacroCall::FindMsbUint => MathFunction::FindMsb,
+ _ => unreachable!(),
+ };
+ let res = ctx.add_expression(
+ Expression::Math {
+ fun,
+ arg: args[0],
+ arg1: None,
+ arg2: None,
+ arg3: None,
+ },
+ Span::default(),
+ body,
+ );
+ ctx.add_expression(
+ Expression::As {
+ expr: res,
+ kind: Sk::Sint,
+ convert: Some(4),
+ },
+ Span::default(),
+ body,
+ )
+ }
+ MacroCall::BitfieldInsert => {
+ let conv_arg_2 = ctx.add_expression(
+ Expression::As {
+ expr: args[2],
+ kind: Sk::Uint,
+ convert: Some(4),
+ },
+ Span::default(),
+ body,
+ );
+ let conv_arg_3 = ctx.add_expression(
+ Expression::As {
+ expr: args[3],
+ kind: Sk::Uint,
+ convert: Some(4),
+ },
+ Span::default(),
+ body,
+ );
+ ctx.add_expression(
+ Expression::Math {
+ fun: MathFunction::InsertBits,
+ arg: args[0],
+ arg1: Some(args[1]),
+ arg2: Some(conv_arg_2),
+ arg3: Some(conv_arg_3),
+ },
+ Span::default(),
+ body,
+ )
+ }
+ MacroCall::BitfieldExtract => {
+ let conv_arg_1 = ctx.add_expression(
+ Expression::As {
+ expr: args[1],
+ kind: Sk::Uint,
+ convert: Some(4),
+ },
+ Span::default(),
+ body,
+ );
+ let conv_arg_2 = ctx.add_expression(
+ Expression::As {
+ expr: args[2],
+ kind: Sk::Uint,
+ convert: Some(4),
+ },
+ Span::default(),
+ body,
+ );
+ ctx.add_expression(
+ Expression::Math {
+ fun: MathFunction::ExtractBits,
+ arg: args[0],
+ arg1: Some(conv_arg_1),
+ arg2: Some(conv_arg_2),
+ arg3: None,
+ },
+ Span::default(),
+ body,
+ )
+ }
+ MacroCall::Relational(fun) => ctx.add_expression(
+ Expression::Relational {
+ fun,
+ argument: args[0],
+ },
+ Span::default(),
+ body,
+ ),
+ MacroCall::Unary(op) => ctx.add_expression(
+ Expression::Unary { op, expr: args[0] },
+ Span::default(),
+ body,
+ ),
+ MacroCall::Binary(op) => ctx.add_expression(
+ Expression::Binary {
+ op,
+ left: args[0],
+ right: args[1],
+ },
+ Span::default(),
+ body,
+ ),
+ MacroCall::Mod(size) => {
+ ctx.implicit_splat(frontend, &mut args[1], meta, size)?;
+
+ // x - y * floor(x / y)
+
+ let div = ctx.add_expression(
+ Expression::Binary {
+ op: BinaryOperator::Divide,
+ left: args[0],
+ right: args[1],
+ },
+ Span::default(),
+ body,
+ );
+ let floor = ctx.add_expression(
+ Expression::Math {
+ fun: MathFunction::Floor,
+ arg: div,
+ arg1: None,
+ arg2: None,
+ arg3: None,
+ },
+ Span::default(),
+ body,
+ );
+ let mult = ctx.add_expression(
+ Expression::Binary {
+ op: BinaryOperator::Multiply,
+ left: floor,
+ right: args[1],
+ },
+ Span::default(),
+ body,
+ );
+ ctx.add_expression(
+ Expression::Binary {
+ op: BinaryOperator::Subtract,
+ left: args[0],
+ right: mult,
+ },
+ Span::default(),
+ body,
+ )
+ }
+ MacroCall::Splatted(fun, size, i) => {
+ ctx.implicit_splat(frontend, &mut args[i], meta, size)?;
+
+ ctx.add_expression(
+ Expression::Math {
+ fun,
+ arg: args[0],
+ arg1: args.get(1).copied(),
+ arg2: args.get(2).copied(),
+ arg3: args.get(3).copied(),
+ },
+ Span::default(),
+ body,
+ )
+ }
+ MacroCall::MixBoolean => ctx.add_expression(
+ Expression::Select {
+ condition: args[2],
+ accept: args[1],
+ reject: args[0],
+ },
+ Span::default(),
+ body,
+ ),
+ MacroCall::Clamp(size) => {
+ ctx.implicit_splat(frontend, &mut args[1], meta, size)?;
+ ctx.implicit_splat(frontend, &mut args[2], meta, size)?;
+
+ ctx.add_expression(
+ Expression::Math {
+ fun: MathFunction::Clamp,
+ arg: args[0],
+ arg1: args.get(1).copied(),
+ arg2: args.get(2).copied(),
+ arg3: args.get(3).copied(),
+ },
+ Span::default(),
+ body,
+ )
+ }
+ MacroCall::BitCast(kind) => ctx.add_expression(
+ Expression::As {
+ expr: args[0],
+ kind,
+ convert: None,
+ },
+ Span::default(),
+ body,
+ ),
+ MacroCall::Derivate(axis, ctrl) => ctx.add_expression(
+ Expression::Derivative {
+ axis,
+ ctrl,
+ expr: args[0],
+ },
+ Span::default(),
+ body,
+ ),
+ MacroCall::Barrier => {
+ ctx.emit_restart(body);
+ body.push(crate::Statement::Barrier(crate::Barrier::all()), meta);
+ return Ok(None);
+ }
+ MacroCall::SmoothStep { splatted } => {
+ ctx.implicit_splat(frontend, &mut args[0], meta, splatted)?;
+ ctx.implicit_splat(frontend, &mut args[1], meta, splatted)?;
+
+ ctx.add_expression(
+ Expression::Math {
+ fun: MathFunction::SmoothStep,
+ arg: args[0],
+ arg1: args.get(1).copied(),
+ arg2: args.get(2).copied(),
+ arg3: None,
+ },
+ Span::default(),
+ body,
+ )
+ }
+ }))
+ }
+}
+
+fn texture_call(
+ ctx: &mut Context,
+ image: Handle<Expression>,
+ level: SampleLevel,
+ comps: CoordComponents,
+ offset: Option<Handle<Constant>>,
+ body: &mut Block,
+ meta: Span,
+) -> Result<Handle<Expression>> {
+ if let Some(sampler) = ctx.samplers.get(&image).copied() {
+ let mut array_index = comps.array_index;
+
+ if let Some(ref mut array_index_expr) = array_index {
+ ctx.conversion(array_index_expr, meta, Sk::Sint, 4)?;
+ }
+
+ Ok(ctx.add_expression(
+ Expression::ImageSample {
+ image,
+ sampler,
+ gather: None, //TODO
+ coordinate: comps.coordinate,
+ array_index,
+ offset,
+ level,
+ depth_ref: comps.depth_ref,
+ },
+ meta,
+ body,
+ ))
+ } else {
+ Err(Error {
+ kind: ErrorKind::SemanticError("Bad call".into()),
+ meta,
+ })
+ }
+}
+
+/// Helper struct for texture calls with the separate components from the vector argument
+///
+/// Obtained by calling [`coordinate_components`](Frontend::coordinate_components)
+#[derive(Debug)]
+struct CoordComponents {
+ coordinate: Handle<Expression>,
+ depth_ref: Option<Handle<Expression>>,
+ array_index: Option<Handle<Expression>>,
+ used_extra: bool,
+}
+
+impl Frontend {
+ /// Helper function for texture calls, splits the vector argument into it's components
+ fn coordinate_components(
+ &mut self,
+ ctx: &mut Context,
+ image: Handle<Expression>,
+ coord: Handle<Expression>,
+ extra: Option<Handle<Expression>>,
+ meta: Span,
+ body: &mut Block,
+ ) -> Result<CoordComponents> {
+ if let TypeInner::Image {
+ dim,
+ arrayed,
+ class,
+ } = *self.resolve_type(ctx, image, meta)?
+ {
+ let image_size = match dim {
+ Dim::D1 => None,
+ Dim::D2 => Some(VectorSize::Bi),
+ Dim::D3 => Some(VectorSize::Tri),
+ Dim::Cube => Some(VectorSize::Tri),
+ };
+ let coord_size = match *self.resolve_type(ctx, coord, meta)? {
+ TypeInner::Vector { size, .. } => Some(size),
+ _ => None,
+ };
+ let (shadow, storage) = match class {
+ ImageClass::Depth { .. } => (true, false),
+ ImageClass::Storage { .. } => (false, true),
+ ImageClass::Sampled { .. } => (false, false),
+ };
+
+ let coordinate = match (image_size, coord_size) {
+ (Some(size), Some(coord_s)) if size != coord_s => {
+ ctx.vector_resize(size, coord, Span::default(), body)
+ }
+ (None, Some(_)) => ctx.add_expression(
+ Expression::AccessIndex {
+ base: coord,
+ index: 0,
+ },
+ Span::default(),
+ body,
+ ),
+ _ => coord,
+ };
+
+ let mut coord_index = image_size.map_or(1, |s| s as u32);
+
+ let array_index = if arrayed && !(storage && dim == Dim::Cube) {
+ let index = coord_index;
+ coord_index += 1;
+
+ Some(ctx.add_expression(
+ Expression::AccessIndex { base: coord, index },
+ Span::default(),
+ body,
+ ))
+ } else {
+ None
+ };
+ let mut used_extra = false;
+ let depth_ref = match shadow {
+ true => {
+ let index = coord_index;
+
+ if index == 4 {
+ used_extra = true;
+ extra
+ } else {
+ Some(ctx.add_expression(
+ Expression::AccessIndex { base: coord, index },
+ Span::default(),
+ body,
+ ))
+ }
+ }
+ false => None,
+ };
+
+ Ok(CoordComponents {
+ coordinate,
+ depth_ref,
+ array_index,
+ used_extra,
+ })
+ } else {
+ self.errors.push(Error {
+ kind: ErrorKind::SemanticError("Type is not an image".into()),
+ meta,
+ });
+
+ Ok(CoordComponents {
+ coordinate: coord,
+ depth_ref: None,
+ array_index: None,
+ used_extra: false,
+ })
+ }
+ }
+}
+
+/// Helper function to cast a expression holding a sampled image to a
+/// depth image.
+pub fn sampled_to_depth(
+ module: &mut Module,
+ ctx: &mut Context,
+ image: Handle<Expression>,
+ meta: Span,
+ errors: &mut Vec<Error>,
+) {
+ // Get the a mutable type handle of the underlying image storage
+ let ty = match ctx[image] {
+ Expression::GlobalVariable(handle) => &mut module.global_variables.get_mut(handle).ty,
+ Expression::FunctionArgument(i) => {
+ // Mark the function argument as carrying a depth texture
+ ctx.parameters_info[i as usize].depth = true;
+ // NOTE: We need to later also change the parameter type
+ &mut ctx.arguments[i as usize].ty
+ }
+ _ => {
+ // Only globals and function arguments are allowed to carry an image
+ return errors.push(Error {
+ kind: ErrorKind::SemanticError("Not a valid texture expression".into()),
+ meta,
+ });
+ }
+ };
+
+ match module.types[*ty].inner {
+ // Update the image class to depth in case it already isn't
+ TypeInner::Image {
+ class,
+ dim,
+ arrayed,
+ } => match class {
+ ImageClass::Sampled { multi, .. } => {
+ *ty = module.types.insert(
+ Type {
+ name: None,
+ inner: TypeInner::Image {
+ dim,
+ arrayed,
+ class: ImageClass::Depth { multi },
+ },
+ },
+ Span::default(),
+ )
+ }
+ ImageClass::Depth { .. } => {}
+ // Other image classes aren't allowed to be transformed to depth
+ ImageClass::Storage { .. } => errors.push(Error {
+ kind: ErrorKind::SemanticError("Not a texture".into()),
+ meta,
+ }),
+ },
+ _ => errors.push(Error {
+ kind: ErrorKind::SemanticError("Not a texture".into()),
+ meta,
+ }),
+ };
+
+ // Copy the handle to allow borrowing the `ctx` again
+ let ty = *ty;
+
+ // If the image was passed through a function argument we also need to change
+ // the corresponding parameter
+ if let Expression::FunctionArgument(i) = ctx[image] {
+ ctx.parameters[i as usize] = ty;
+ }
+}
+
+bitflags::bitflags! {
+ /// Influences the operation `texture_args_generator`
+ struct TextureArgsOptions: u32 {
+ /// Generates multisampled variants of images
+ const MULTI = 1 << 0;
+ /// Generates shadow variants of images
+ const SHADOW = 1 << 1;
+ /// Generates standard images
+ const STANDARD = 1 << 2;
+ /// Generates cube arrayed images
+ const CUBE_ARRAY = 1 << 3;
+ /// Generates cube arrayed images
+ const D2_MULTI_ARRAY = 1 << 4;
+ }
+}
+
+impl From<BuiltinVariations> for TextureArgsOptions {
+ fn from(variations: BuiltinVariations) -> Self {
+ let mut options = TextureArgsOptions::empty();
+ if variations.contains(BuiltinVariations::STANDARD) {
+ options |= TextureArgsOptions::STANDARD
+ }
+ if variations.contains(BuiltinVariations::CUBE_TEXTURES_ARRAY) {
+ options |= TextureArgsOptions::CUBE_ARRAY
+ }
+ if variations.contains(BuiltinVariations::D2_MULTI_TEXTURES_ARRAY) {
+ options |= TextureArgsOptions::D2_MULTI_ARRAY
+ }
+ options
+ }
+}
+
+/// Helper function to generate the image components for texture/image builtins
+///
+/// Calls the passed function `f` with:
+/// ```text
+/// f(ScalarKind, ImageDimension, arrayed, multi, shadow)
+/// ```
+///
+/// `options` controls extra image variants generation like multisampling and depth,
+/// see the struct documentation
+fn texture_args_generator(
+ options: TextureArgsOptions,
+ mut f: impl FnMut(crate::ScalarKind, Dim, bool, bool, bool),
+) {
+ for kind in [Sk::Float, Sk::Uint, Sk::Sint].iter().copied() {
+ for dim in [Dim::D1, Dim::D2, Dim::D3, Dim::Cube].iter().copied() {
+ for arrayed in [false, true].iter().copied() {
+ if dim == Dim::Cube && arrayed {
+ if !options.contains(TextureArgsOptions::CUBE_ARRAY) {
+ continue;
+ }
+ } else if Dim::D2 == dim
+ && options.contains(TextureArgsOptions::MULTI)
+ && arrayed
+ && options.contains(TextureArgsOptions::D2_MULTI_ARRAY)
+ {
+ // multisampling for sampler2DMSArray
+ f(kind, dim, arrayed, true, false);
+ } else if !options.contains(TextureArgsOptions::STANDARD) {
+ continue;
+ }
+
+ f(kind, dim, arrayed, false, false);
+
+ // 3D images can't be neither arrayed nor shadow
+ // so we break out early, this way arrayed will always
+ // be false and we won't hit the shadow branch
+ if let Dim::D3 = dim {
+ break;
+ }
+
+ if Dim::D2 == dim && options.contains(TextureArgsOptions::MULTI) && !arrayed {
+ // multisampling
+ f(kind, dim, arrayed, true, false);
+ }
+
+ if Sk::Float == kind && options.contains(TextureArgsOptions::SHADOW) {
+ // shadow
+ f(kind, dim, arrayed, false, true);
+ }
+ }
+ }
+ }
+}
+
+/// Helper functions used to convert from a image dimension into a integer representing the
+/// number of components needed for the coordinates vector (1 means scalar instead of vector)
+const fn image_dims_to_coords_size(dim: Dim) -> usize {
+ match dim {
+ Dim::D1 => 1,
+ Dim::D2 => 2,
+ _ => 3,
+ }
+}
diff --git a/third_party/rust/naga/src/front/glsl/constants.rs b/third_party/rust/naga/src/front/glsl/constants.rs
new file mode 100644
index 0000000000..045a9c6ffb
--- /dev/null
+++ b/third_party/rust/naga/src/front/glsl/constants.rs
@@ -0,0 +1,984 @@
+use crate::{
+ arena::{Arena, Handle, UniqueArena},
+ BinaryOperator, Constant, ConstantInner, Expression, ScalarKind, ScalarValue, Type, TypeInner,
+ UnaryOperator,
+};
+
+#[derive(Debug)]
+pub struct ConstantSolver<'a> {
+ pub types: &'a mut UniqueArena<Type>,
+ pub expressions: &'a Arena<Expression>,
+ pub constants: &'a mut Arena<Constant>,
+}
+
+#[derive(Clone, Debug, PartialEq, thiserror::Error)]
+pub enum ConstantSolvingError {
+ #[error("Constants cannot access function arguments")]
+ FunctionArg,
+ #[error("Constants cannot access global variables")]
+ GlobalVariable,
+ #[error("Constants cannot access local variables")]
+ LocalVariable,
+ #[error("Cannot get the array length of a non array type")]
+ InvalidArrayLengthArg,
+ #[error("Constants cannot get the array length of a dynamically sized array")]
+ ArrayLengthDynamic,
+ #[error("Constants cannot call functions")]
+ Call,
+ #[error("Constants don't support atomic functions")]
+ Atomic,
+ #[error("Constants don't support relational functions")]
+ Relational,
+ #[error("Constants don't support derivative functions")]
+ Derivative,
+ #[error("Constants don't support select expressions")]
+ Select,
+ #[error("Constants don't support load expressions")]
+ Load,
+ #[error("Constants don't support image expressions")]
+ ImageExpression,
+ #[error("Constants don't support ray query expressions")]
+ RayQueryExpression,
+ #[error("Cannot access the type")]
+ InvalidAccessBase,
+ #[error("Cannot access at the index")]
+ InvalidAccessIndex,
+ #[error("Cannot access with index of type")]
+ InvalidAccessIndexTy,
+ #[error("Constants don't support bitcasts")]
+ Bitcast,
+ #[error("Cannot cast type")]
+ InvalidCastArg,
+ #[error("Cannot apply the unary op to the argument")]
+ InvalidUnaryOpArg,
+ #[error("Cannot apply the binary op to the arguments")]
+ InvalidBinaryOpArgs,
+ #[error("Cannot apply math function to type")]
+ InvalidMathArg,
+ #[error("Splat is defined only on scalar values")]
+ SplatScalarOnly,
+ #[error("Can only swizzle vector constants")]
+ SwizzleVectorOnly,
+ #[error("Not implemented as constant expression: {0}")]
+ NotImplemented(String),
+}
+
+impl<'a> ConstantSolver<'a> {
+ pub fn solve(
+ &mut self,
+ expr: Handle<Expression>,
+ ) -> Result<Handle<Constant>, ConstantSolvingError> {
+ let span = self.expressions.get_span(expr);
+ match self.expressions[expr] {
+ Expression::Constant(constant) => Ok(constant),
+ Expression::AccessIndex { base, index } => self.access(base, index as usize),
+ Expression::Access { base, index } => {
+ let index = self.solve(index)?;
+
+ self.access(base, self.constant_index(index)?)
+ }
+ Expression::Splat {
+ size,
+ value: splat_value,
+ } => {
+ let value_constant = self.solve(splat_value)?;
+ let ty = match self.constants[value_constant].inner {
+ ConstantInner::Scalar { ref value, width } => {
+ let kind = value.scalar_kind();
+ self.types.insert(
+ Type {
+ name: None,
+ inner: TypeInner::Vector { size, kind, width },
+ },
+ span,
+ )
+ }
+ ConstantInner::Composite { .. } => {
+ return Err(ConstantSolvingError::SplatScalarOnly);
+ }
+ };
+
+ let inner = ConstantInner::Composite {
+ ty,
+ components: vec![value_constant; size as usize],
+ };
+ Ok(self.register_constant(inner, span))
+ }
+ Expression::Swizzle {
+ size,
+ vector: src_vector,
+ pattern,
+ } => {
+ let src_constant = self.solve(src_vector)?;
+ let (ty, src_components) = match self.constants[src_constant].inner {
+ ConstantInner::Scalar { .. } => {
+ return Err(ConstantSolvingError::SwizzleVectorOnly);
+ }
+ ConstantInner::Composite {
+ ty,
+ components: ref src_components,
+ } => match self.types[ty].inner {
+ crate::TypeInner::Vector {
+ size: _,
+ kind,
+ width,
+ } => {
+ let dst_ty = self.types.insert(
+ Type {
+ name: None,
+ inner: crate::TypeInner::Vector { size, kind, width },
+ },
+ span,
+ );
+ (dst_ty, &src_components[..])
+ }
+ _ => {
+ return Err(ConstantSolvingError::SwizzleVectorOnly);
+ }
+ },
+ };
+
+ let components = pattern
+ .iter()
+ .map(|&sc| src_components[sc as usize])
+ .collect();
+ let inner = ConstantInner::Composite { ty, components };
+
+ Ok(self.register_constant(inner, span))
+ }
+ Expression::Compose { ty, ref components } => {
+ let components = components
+ .iter()
+ .map(|c| self.solve(*c))
+ .collect::<Result<_, _>>()?;
+ let inner = ConstantInner::Composite { ty, components };
+
+ Ok(self.register_constant(inner, span))
+ }
+ Expression::Unary { expr, op } => {
+ let expr_constant = self.solve(expr)?;
+
+ self.unary_op(op, expr_constant, span)
+ }
+ Expression::Binary { left, right, op } => {
+ let left_constant = self.solve(left)?;
+ let right_constant = self.solve(right)?;
+
+ self.binary_op(op, left_constant, right_constant, span)
+ }
+ Expression::Math {
+ fun,
+ arg,
+ arg1,
+ arg2,
+ ..
+ } => {
+ let arg = self.solve(arg)?;
+ let arg1 = arg1.map(|arg| self.solve(arg)).transpose()?;
+ let arg2 = arg2.map(|arg| self.solve(arg)).transpose()?;
+
+ let const0 = &self.constants[arg].inner;
+ let const1 = arg1.map(|arg| &self.constants[arg].inner);
+ let const2 = arg2.map(|arg| &self.constants[arg].inner);
+
+ match fun {
+ crate::MathFunction::Pow => {
+ let (value, width) = match (const0, const1.unwrap()) {
+ (
+ &ConstantInner::Scalar {
+ width,
+ value: value0,
+ },
+ &ConstantInner::Scalar { value: value1, .. },
+ ) => (
+ match (value0, value1) {
+ (ScalarValue::Sint(a), ScalarValue::Sint(b)) => {
+ ScalarValue::Sint(a.pow(b as u32))
+ }
+ (ScalarValue::Uint(a), ScalarValue::Uint(b)) => {
+ ScalarValue::Uint(a.pow(b as u32))
+ }
+ (ScalarValue::Float(a), ScalarValue::Float(b)) => {
+ ScalarValue::Float(a.powf(b))
+ }
+ _ => return Err(ConstantSolvingError::InvalidMathArg),
+ },
+ width,
+ ),
+ _ => return Err(ConstantSolvingError::InvalidMathArg),
+ };
+
+ let inner = ConstantInner::Scalar { width, value };
+ Ok(self.register_constant(inner, span))
+ }
+ crate::MathFunction::Clamp => {
+ let (value, width) = match (const0, const1.unwrap(), const2.unwrap()) {
+ (
+ &ConstantInner::Scalar {
+ width,
+ value: value0,
+ },
+ &ConstantInner::Scalar { value: value1, .. },
+ &ConstantInner::Scalar { value: value2, .. },
+ ) => (
+ match (value0, value1, value2) {
+ (
+ ScalarValue::Sint(a),
+ ScalarValue::Sint(b),
+ ScalarValue::Sint(c),
+ ) => ScalarValue::Sint(a.clamp(b, c)),
+ (
+ ScalarValue::Uint(a),
+ ScalarValue::Uint(b),
+ ScalarValue::Uint(c),
+ ) => ScalarValue::Uint(a.clamp(b, c)),
+ (
+ ScalarValue::Float(a),
+ ScalarValue::Float(b),
+ ScalarValue::Float(c),
+ ) => ScalarValue::Float(glsl_float_clamp(a, b, c)),
+ _ => return Err(ConstantSolvingError::InvalidMathArg),
+ },
+ width,
+ ),
+ _ => {
+ return Err(ConstantSolvingError::NotImplemented(format!(
+ "{fun:?} applied to vector values"
+ )))
+ }
+ };
+
+ let inner = ConstantInner::Scalar { width, value };
+ Ok(self.register_constant(inner, span))
+ }
+ _ => Err(ConstantSolvingError::NotImplemented(format!("{fun:?}"))),
+ }
+ }
+ Expression::As {
+ convert,
+ expr,
+ kind,
+ } => {
+ let expr_constant = self.solve(expr)?;
+
+ match convert {
+ Some(width) => self.cast(expr_constant, kind, width, span),
+ None => Err(ConstantSolvingError::Bitcast),
+ }
+ }
+ Expression::ArrayLength(expr) => {
+ let array = self.solve(expr)?;
+
+ match self.constants[array].inner {
+ ConstantInner::Scalar { .. } => {
+ Err(ConstantSolvingError::InvalidArrayLengthArg)
+ }
+ ConstantInner::Composite { ty, .. } => match self.types[ty].inner {
+ TypeInner::Array { size, .. } => match size {
+ crate::ArraySize::Constant(constant) => Ok(constant),
+ crate::ArraySize::Dynamic => {
+ Err(ConstantSolvingError::ArrayLengthDynamic)
+ }
+ },
+ _ => Err(ConstantSolvingError::InvalidArrayLengthArg),
+ },
+ }
+ }
+
+ Expression::Load { .. } => Err(ConstantSolvingError::Load),
+ Expression::Select { .. } => Err(ConstantSolvingError::Select),
+ Expression::LocalVariable(_) => Err(ConstantSolvingError::LocalVariable),
+ Expression::Derivative { .. } => Err(ConstantSolvingError::Derivative),
+ Expression::Relational { .. } => Err(ConstantSolvingError::Relational),
+ Expression::CallResult { .. } => Err(ConstantSolvingError::Call),
+ Expression::AtomicResult { .. } => Err(ConstantSolvingError::Atomic),
+ Expression::FunctionArgument(_) => Err(ConstantSolvingError::FunctionArg),
+ Expression::GlobalVariable(_) => Err(ConstantSolvingError::GlobalVariable),
+ Expression::ImageSample { .. }
+ | Expression::ImageLoad { .. }
+ | Expression::ImageQuery { .. } => Err(ConstantSolvingError::ImageExpression),
+ Expression::RayQueryProceedResult | Expression::RayQueryGetIntersection { .. } => {
+ Err(ConstantSolvingError::RayQueryExpression)
+ }
+ }
+ }
+
+ fn access(
+ &mut self,
+ base: Handle<Expression>,
+ index: usize,
+ ) -> Result<Handle<Constant>, ConstantSolvingError> {
+ let base = self.solve(base)?;
+
+ match self.constants[base].inner {
+ ConstantInner::Scalar { .. } => Err(ConstantSolvingError::InvalidAccessBase),
+ ConstantInner::Composite { ty, ref components } => {
+ match self.types[ty].inner {
+ TypeInner::Vector { .. }
+ | TypeInner::Matrix { .. }
+ | TypeInner::Array { .. }
+ | TypeInner::Struct { .. } => (),
+ _ => return Err(ConstantSolvingError::InvalidAccessBase),
+ }
+
+ components
+ .get(index)
+ .copied()
+ .ok_or(ConstantSolvingError::InvalidAccessIndex)
+ }
+ }
+ }
+
+ fn constant_index(&self, constant: Handle<Constant>) -> Result<usize, ConstantSolvingError> {
+ match self.constants[constant].inner {
+ ConstantInner::Scalar {
+ value: ScalarValue::Uint(index),
+ ..
+ } => Ok(index as usize),
+ _ => Err(ConstantSolvingError::InvalidAccessIndexTy),
+ }
+ }
+
+ fn cast(
+ &mut self,
+ constant: Handle<Constant>,
+ kind: ScalarKind,
+ target_width: crate::Bytes,
+ span: crate::Span,
+ ) -> Result<Handle<Constant>, ConstantSolvingError> {
+ let mut inner = self.constants[constant].inner.clone();
+
+ match inner {
+ ConstantInner::Scalar {
+ ref mut value,
+ ref mut width,
+ } => {
+ *width = target_width;
+ *value = match kind {
+ ScalarKind::Sint => ScalarValue::Sint(match *value {
+ ScalarValue::Sint(v) => v,
+ ScalarValue::Uint(v) => v as i64,
+ ScalarValue::Float(v) => v as i64,
+ ScalarValue::Bool(v) => v as i64,
+ }),
+ ScalarKind::Uint => ScalarValue::Uint(match *value {
+ ScalarValue::Sint(v) => v as u64,
+ ScalarValue::Uint(v) => v,
+ ScalarValue::Float(v) => v as u64,
+ ScalarValue::Bool(v) => v as u64,
+ }),
+ ScalarKind::Float => ScalarValue::Float(match *value {
+ ScalarValue::Sint(v) => v as f64,
+ ScalarValue::Uint(v) => v as f64,
+ ScalarValue::Float(v) => v,
+ ScalarValue::Bool(v) => v as u64 as f64,
+ }),
+ ScalarKind::Bool => ScalarValue::Bool(match *value {
+ ScalarValue::Sint(v) => v != 0,
+ ScalarValue::Uint(v) => v != 0,
+ ScalarValue::Float(v) => v != 0.0,
+ ScalarValue::Bool(v) => v,
+ }),
+ }
+ }
+ ConstantInner::Composite {
+ ty,
+ ref mut components,
+ } => {
+ match self.types[ty].inner {
+ TypeInner::Vector { .. } | TypeInner::Matrix { .. } => (),
+ _ => return Err(ConstantSolvingError::InvalidCastArg),
+ }
+
+ for component in components {
+ *component = self.cast(*component, kind, target_width, span)?;
+ }
+ }
+ }
+
+ Ok(self.register_constant(inner, span))
+ }
+
+ fn unary_op(
+ &mut self,
+ op: UnaryOperator,
+ constant: Handle<Constant>,
+ span: crate::Span,
+ ) -> Result<Handle<Constant>, ConstantSolvingError> {
+ let mut inner = self.constants[constant].inner.clone();
+
+ match inner {
+ ConstantInner::Scalar { ref mut value, .. } => match op {
+ UnaryOperator::Negate => match *value {
+ ScalarValue::Sint(ref mut v) => *v = -*v,
+ ScalarValue::Float(ref mut v) => *v = -*v,
+ _ => return Err(ConstantSolvingError::InvalidUnaryOpArg),
+ },
+ UnaryOperator::Not => match *value {
+ ScalarValue::Sint(ref mut v) => *v = !*v,
+ ScalarValue::Uint(ref mut v) => *v = !*v,
+ ScalarValue::Bool(ref mut v) => *v = !*v,
+ ScalarValue::Float(_) => return Err(ConstantSolvingError::InvalidUnaryOpArg),
+ },
+ },
+ ConstantInner::Composite {
+ ty,
+ ref mut components,
+ } => {
+ match self.types[ty].inner {
+ TypeInner::Vector { .. } | TypeInner::Matrix { .. } => (),
+ _ => return Err(ConstantSolvingError::InvalidCastArg),
+ }
+
+ for component in components {
+ *component = self.unary_op(op, *component, span)?
+ }
+ }
+ }
+
+ Ok(self.register_constant(inner, span))
+ }
+
+ fn binary_op(
+ &mut self,
+ op: BinaryOperator,
+ left: Handle<Constant>,
+ right: Handle<Constant>,
+ span: crate::Span,
+ ) -> Result<Handle<Constant>, ConstantSolvingError> {
+ let left_inner = &self.constants[left].inner;
+ let right_inner = &self.constants[right].inner;
+
+ let inner = match (left_inner, right_inner) {
+ (
+ &ConstantInner::Scalar {
+ value: left_value,
+ width,
+ },
+ &ConstantInner::Scalar {
+ value: right_value,
+ width: _,
+ },
+ ) => {
+ let value = match op {
+ BinaryOperator::Equal => ScalarValue::Bool(left_value == right_value),
+ BinaryOperator::NotEqual => ScalarValue::Bool(left_value != right_value),
+ BinaryOperator::Less => ScalarValue::Bool(left_value < right_value),
+ BinaryOperator::LessEqual => ScalarValue::Bool(left_value <= right_value),
+ BinaryOperator::Greater => ScalarValue::Bool(left_value > right_value),
+ BinaryOperator::GreaterEqual => ScalarValue::Bool(left_value >= right_value),
+
+ _ => match (left_value, right_value) {
+ (ScalarValue::Sint(a), ScalarValue::Sint(b)) => {
+ ScalarValue::Sint(match op {
+ BinaryOperator::Add => a.wrapping_add(b),
+ BinaryOperator::Subtract => a.wrapping_sub(b),
+ BinaryOperator::Multiply => a.wrapping_mul(b),
+ BinaryOperator::Divide => a.checked_div(b).unwrap_or(0),
+ BinaryOperator::Modulo => a.checked_rem(b).unwrap_or(0),
+ BinaryOperator::And => a & b,
+ BinaryOperator::ExclusiveOr => a ^ b,
+ BinaryOperator::InclusiveOr => a | b,
+ _ => return Err(ConstantSolvingError::InvalidBinaryOpArgs),
+ })
+ }
+ (ScalarValue::Sint(a), ScalarValue::Uint(b)) => {
+ ScalarValue::Sint(match op {
+ BinaryOperator::ShiftLeft => a.wrapping_shl(b as u32),
+ BinaryOperator::ShiftRight => a.wrapping_shr(b as u32),
+ _ => return Err(ConstantSolvingError::InvalidBinaryOpArgs),
+ })
+ }
+ (ScalarValue::Uint(a), ScalarValue::Uint(b)) => {
+ ScalarValue::Uint(match op {
+ BinaryOperator::Add => a.wrapping_add(b),
+ BinaryOperator::Subtract => a.wrapping_sub(b),
+ BinaryOperator::Multiply => a.wrapping_mul(b),
+ BinaryOperator::Divide => a.checked_div(b).unwrap_or(0),
+ BinaryOperator::Modulo => a.checked_rem(b).unwrap_or(0),
+ BinaryOperator::And => a & b,
+ BinaryOperator::ExclusiveOr => a ^ b,
+ BinaryOperator::InclusiveOr => a | b,
+ BinaryOperator::ShiftLeft => a.wrapping_shl(b as u32),
+ BinaryOperator::ShiftRight => a.wrapping_shr(b as u32),
+ _ => return Err(ConstantSolvingError::InvalidBinaryOpArgs),
+ })
+ }
+ (ScalarValue::Float(a), ScalarValue::Float(b)) => {
+ ScalarValue::Float(match op {
+ BinaryOperator::Add => a + b,
+ BinaryOperator::Subtract => a - b,
+ BinaryOperator::Multiply => a * b,
+ BinaryOperator::Divide => a / b,
+ BinaryOperator::Modulo => a - b * (a / b).floor(),
+ _ => return Err(ConstantSolvingError::InvalidBinaryOpArgs),
+ })
+ }
+ (ScalarValue::Bool(a), ScalarValue::Bool(b)) => {
+ ScalarValue::Bool(match op {
+ BinaryOperator::LogicalAnd => a && b,
+ BinaryOperator::LogicalOr => a || b,
+ _ => return Err(ConstantSolvingError::InvalidBinaryOpArgs),
+ })
+ }
+ _ => return Err(ConstantSolvingError::InvalidBinaryOpArgs),
+ },
+ };
+
+ ConstantInner::Scalar { value, width }
+ }
+ (&ConstantInner::Composite { ref components, ty }, &ConstantInner::Scalar { .. }) => {
+ let mut components = components.clone();
+ for comp in components.iter_mut() {
+ *comp = self.binary_op(op, *comp, right, span)?;
+ }
+ ConstantInner::Composite { ty, components }
+ }
+ (&ConstantInner::Scalar { .. }, &ConstantInner::Composite { ref components, ty }) => {
+ let mut components = components.clone();
+ for comp in components.iter_mut() {
+ *comp = self.binary_op(op, left, *comp, span)?;
+ }
+ ConstantInner::Composite { ty, components }
+ }
+ _ => return Err(ConstantSolvingError::InvalidBinaryOpArgs),
+ };
+
+ Ok(self.register_constant(inner, span))
+ }
+
+ fn register_constant(&mut self, inner: ConstantInner, span: crate::Span) -> Handle<Constant> {
+ self.constants.fetch_or_append(
+ Constant {
+ name: None,
+ specialization: None,
+ inner,
+ },
+ span,
+ )
+ }
+}
+
+/// Helper function to implement the GLSL `max` function for floats.
+///
+/// While Rust does provide a `f64::max` method, it has a different behavior than the
+/// GLSL `max` for NaNs. In Rust, if any of the arguments is a NaN, then the other
+/// is returned.
+///
+/// This leads to different results in the following example
+/// ```
+/// use std::cmp::max;
+/// std::f64::NAN.max(1.0);
+/// ```
+///
+/// Rust will return `1.0` while GLSL should return NaN.
+fn glsl_float_max(x: f64, y: f64) -> f64 {
+ if x < y {
+ y
+ } else {
+ x
+ }
+}
+
+/// Helper function to implement the GLSL `min` function for floats.
+///
+/// While Rust does provide a `f64::min` method, it has a different behavior than the
+/// GLSL `min` for NaNs. In Rust, if any of the arguments is a NaN, then the other
+/// is returned.
+///
+/// This leads to different results in the following example
+/// ```
+/// use std::cmp::min;
+/// std::f64::NAN.min(1.0);
+/// ```
+///
+/// Rust will return `1.0` while GLSL should return NaN.
+fn glsl_float_min(x: f64, y: f64) -> f64 {
+ if y < x {
+ y
+ } else {
+ x
+ }
+}
+
+/// Helper function to implement the GLSL `clamp` function for floats.
+///
+/// While Rust does provide a `f64::clamp` method, it panics if either
+/// `min` or `max` are `NaN`s which is not the behavior specified by
+/// the glsl specification.
+fn glsl_float_clamp(value: f64, min: f64, max: f64) -> f64 {
+ glsl_float_min(glsl_float_max(value, min), max)
+}
+
+#[cfg(test)]
+mod tests {
+ use std::vec;
+
+ use crate::{
+ Arena, Constant, ConstantInner, Expression, ScalarKind, ScalarValue, Type, TypeInner,
+ UnaryOperator, UniqueArena, VectorSize,
+ };
+
+ use super::ConstantSolver;
+
+ #[test]
+ fn nan_handling() {
+ assert!(super::glsl_float_max(f64::NAN, 2.0).is_nan());
+ assert!(!super::glsl_float_max(2.0, f64::NAN).is_nan());
+
+ assert!(super::glsl_float_min(f64::NAN, 2.0).is_nan());
+ assert!(!super::glsl_float_min(2.0, f64::NAN).is_nan());
+
+ assert!(super::glsl_float_clamp(f64::NAN, 1.0, 2.0).is_nan());
+ assert!(!super::glsl_float_clamp(1.0, f64::NAN, 2.0).is_nan());
+ assert!(!super::glsl_float_clamp(1.0, 2.0, f64::NAN).is_nan());
+ }
+
+ #[test]
+ fn unary_op() {
+ let mut types = UniqueArena::new();
+ let mut expressions = Arena::new();
+ let mut constants = Arena::new();
+
+ let vec_ty = types.insert(
+ Type {
+ name: None,
+ inner: TypeInner::Vector {
+ size: VectorSize::Bi,
+ kind: ScalarKind::Sint,
+ width: 4,
+ },
+ },
+ Default::default(),
+ );
+
+ let h = constants.append(
+ Constant {
+ name: None,
+ specialization: None,
+ inner: ConstantInner::Scalar {
+ width: 4,
+ value: ScalarValue::Sint(4),
+ },
+ },
+ Default::default(),
+ );
+
+ let h1 = constants.append(
+ Constant {
+ name: None,
+ specialization: None,
+ inner: ConstantInner::Scalar {
+ width: 4,
+ value: ScalarValue::Sint(8),
+ },
+ },
+ Default::default(),
+ );
+
+ let vec_h = constants.append(
+ Constant {
+ name: None,
+ specialization: None,
+ inner: ConstantInner::Composite {
+ ty: vec_ty,
+ components: vec![h, h1],
+ },
+ },
+ Default::default(),
+ );
+
+ let expr = expressions.append(Expression::Constant(h), Default::default());
+ let expr1 = expressions.append(Expression::Constant(vec_h), Default::default());
+
+ let root1 = expressions.append(
+ Expression::Unary {
+ op: UnaryOperator::Negate,
+ expr,
+ },
+ Default::default(),
+ );
+
+ let root2 = expressions.append(
+ Expression::Unary {
+ op: UnaryOperator::Not,
+ expr,
+ },
+ Default::default(),
+ );
+
+ let root3 = expressions.append(
+ Expression::Unary {
+ op: UnaryOperator::Not,
+ expr: expr1,
+ },
+ Default::default(),
+ );
+
+ let mut solver = ConstantSolver {
+ types: &mut types,
+ expressions: &expressions,
+ constants: &mut constants,
+ };
+
+ let res1 = solver.solve(root1).unwrap();
+ let res2 = solver.solve(root2).unwrap();
+ let res3 = solver.solve(root3).unwrap();
+
+ assert_eq!(
+ constants[res1].inner,
+ ConstantInner::Scalar {
+ width: 4,
+ value: ScalarValue::Sint(-4),
+ },
+ );
+
+ assert_eq!(
+ constants[res2].inner,
+ ConstantInner::Scalar {
+ width: 4,
+ value: ScalarValue::Sint(!4),
+ },
+ );
+
+ let res3_inner = &constants[res3].inner;
+
+ match *res3_inner {
+ ConstantInner::Composite {
+ ref ty,
+ ref components,
+ } => {
+ assert_eq!(*ty, vec_ty);
+ let mut components_iter = components.iter().copied();
+ assert_eq!(
+ constants[components_iter.next().unwrap()].inner,
+ ConstantInner::Scalar {
+ width: 4,
+ value: ScalarValue::Sint(!4),
+ },
+ );
+ assert_eq!(
+ constants[components_iter.next().unwrap()].inner,
+ ConstantInner::Scalar {
+ width: 4,
+ value: ScalarValue::Sint(!8),
+ },
+ );
+ assert!(components_iter.next().is_none());
+ }
+ _ => panic!("Expected vector"),
+ }
+ }
+
+ #[test]
+ fn cast() {
+ let mut expressions = Arena::new();
+ let mut constants = Arena::new();
+
+ let h = constants.append(
+ Constant {
+ name: None,
+ specialization: None,
+ inner: ConstantInner::Scalar {
+ width: 4,
+ value: ScalarValue::Sint(4),
+ },
+ },
+ Default::default(),
+ );
+
+ let expr = expressions.append(Expression::Constant(h), Default::default());
+
+ let root = expressions.append(
+ Expression::As {
+ expr,
+ kind: ScalarKind::Bool,
+ convert: Some(crate::BOOL_WIDTH),
+ },
+ Default::default(),
+ );
+
+ let mut solver = ConstantSolver {
+ types: &mut UniqueArena::new(),
+ expressions: &expressions,
+ constants: &mut constants,
+ };
+
+ let res = solver.solve(root).unwrap();
+
+ assert_eq!(
+ constants[res].inner,
+ ConstantInner::Scalar {
+ width: crate::BOOL_WIDTH,
+ value: ScalarValue::Bool(true),
+ },
+ );
+ }
+
+ #[test]
+ fn access() {
+ let mut types = UniqueArena::new();
+ let mut expressions = Arena::new();
+ let mut constants = Arena::new();
+
+ let matrix_ty = types.insert(
+ Type {
+ name: None,
+ inner: TypeInner::Matrix {
+ columns: VectorSize::Bi,
+ rows: VectorSize::Tri,
+ width: 4,
+ },
+ },
+ Default::default(),
+ );
+
+ let vec_ty = types.insert(
+ Type {
+ name: None,
+ inner: TypeInner::Vector {
+ size: VectorSize::Tri,
+ kind: ScalarKind::Float,
+ width: 4,
+ },
+ },
+ Default::default(),
+ );
+
+ let mut vec1_components = Vec::with_capacity(3);
+ let mut vec2_components = Vec::with_capacity(3);
+
+ for i in 0..3 {
+ let h = constants.append(
+ Constant {
+ name: None,
+ specialization: None,
+ inner: ConstantInner::Scalar {
+ width: 4,
+ value: ScalarValue::Float(i as f64),
+ },
+ },
+ Default::default(),
+ );
+
+ vec1_components.push(h)
+ }
+
+ for i in 3..6 {
+ let h = constants.append(
+ Constant {
+ name: None,
+ specialization: None,
+ inner: ConstantInner::Scalar {
+ width: 4,
+ value: ScalarValue::Float(i as f64),
+ },
+ },
+ Default::default(),
+ );
+
+ vec2_components.push(h)
+ }
+
+ let vec1 = constants.append(
+ Constant {
+ name: None,
+ specialization: None,
+ inner: ConstantInner::Composite {
+ ty: vec_ty,
+ components: vec1_components,
+ },
+ },
+ Default::default(),
+ );
+
+ let vec2 = constants.append(
+ Constant {
+ name: None,
+ specialization: None,
+ inner: ConstantInner::Composite {
+ ty: vec_ty,
+ components: vec2_components,
+ },
+ },
+ Default::default(),
+ );
+
+ let h = constants.append(
+ Constant {
+ name: None,
+ specialization: None,
+ inner: ConstantInner::Composite {
+ ty: matrix_ty,
+ components: vec![vec1, vec2],
+ },
+ },
+ Default::default(),
+ );
+
+ let base = expressions.append(Expression::Constant(h), Default::default());
+ let root1 = expressions.append(
+ Expression::AccessIndex { base, index: 1 },
+ Default::default(),
+ );
+ let root2 = expressions.append(
+ Expression::AccessIndex {
+ base: root1,
+ index: 2,
+ },
+ Default::default(),
+ );
+
+ let mut solver = ConstantSolver {
+ types: &mut types,
+ expressions: &expressions,
+ constants: &mut constants,
+ };
+
+ let res1 = solver.solve(root1).unwrap();
+ let res2 = solver.solve(root2).unwrap();
+
+ let res1_inner = &constants[res1].inner;
+
+ match *res1_inner {
+ ConstantInner::Composite {
+ ref ty,
+ ref components,
+ } => {
+ assert_eq!(*ty, vec_ty);
+ let mut components_iter = components.iter().copied();
+ assert_eq!(
+ constants[components_iter.next().unwrap()].inner,
+ ConstantInner::Scalar {
+ width: 4,
+ value: ScalarValue::Float(3.),
+ },
+ );
+ assert_eq!(
+ constants[components_iter.next().unwrap()].inner,
+ ConstantInner::Scalar {
+ width: 4,
+ value: ScalarValue::Float(4.),
+ },
+ );
+ assert_eq!(
+ constants[components_iter.next().unwrap()].inner,
+ ConstantInner::Scalar {
+ width: 4,
+ value: ScalarValue::Float(5.),
+ },
+ );
+ assert!(components_iter.next().is_none());
+ }
+ _ => panic!("Expected vector"),
+ }
+
+ assert_eq!(
+ constants[res2].inner,
+ ConstantInner::Scalar {
+ width: 4,
+ value: ScalarValue::Float(5.),
+ },
+ );
+ }
+}
diff --git a/third_party/rust/naga/src/front/glsl/context.rs b/third_party/rust/naga/src/front/glsl/context.rs
new file mode 100644
index 0000000000..6df4850efa
--- /dev/null
+++ b/third_party/rust/naga/src/front/glsl/context.rs
@@ -0,0 +1,1609 @@
+use super::{
+ ast::{
+ GlobalLookup, GlobalLookupKind, HirExpr, HirExprKind, ParameterInfo, ParameterQualifier,
+ VariableReference,
+ },
+ error::{Error, ErrorKind},
+ types::{scalar_components, type_power},
+ Frontend, Result,
+};
+use crate::{
+ front::{Emitter, Typifier},
+ AddressSpace, Arena, BinaryOperator, Block, Constant, Expression, FastHashMap,
+ FunctionArgument, Handle, LocalVariable, RelationalFunction, ScalarKind, ScalarValue, Span,
+ Statement, Type, TypeInner, VectorSize,
+};
+use std::{convert::TryFrom, ops::Index};
+
+/// The position at which an expression is, used while lowering
+#[derive(Clone, Copy, PartialEq, Eq, Debug)]
+pub enum ExprPos {
+ /// The expression is in the left hand side of an assignment
+ Lhs,
+ /// The expression is in the right hand side of an assignment
+ Rhs,
+ /// The expression is an array being indexed, needed to allow constant
+ /// arrays to be dynamically indexed
+ AccessBase {
+ /// The index is a constant
+ constant_index: bool,
+ },
+}
+
+impl ExprPos {
+ /// Returns an lhs position if the current position is lhs otherwise AccessBase
+ const fn maybe_access_base(&self, constant_index: bool) -> Self {
+ match *self {
+ ExprPos::Lhs
+ | ExprPos::AccessBase {
+ constant_index: false,
+ } => *self,
+ _ => ExprPos::AccessBase { constant_index },
+ }
+ }
+}
+
+#[derive(Debug)]
+pub struct Context {
+ pub expressions: Arena<Expression>,
+ pub locals: Arena<LocalVariable>,
+
+ /// The [`FunctionArgument`]s for the final [`crate::Function`].
+ ///
+ /// Parameters with the `out` and `inout` qualifiers have [`Pointer`] types
+ /// here. For example, an `inout vec2 a` argument would be a [`Pointer`] to
+ /// a [`Vector`].
+ ///
+ /// [`Pointer`]: crate::TypeInner::Pointer
+ /// [`Vector`]: crate::TypeInner::Vector
+ pub arguments: Vec<FunctionArgument>,
+
+ /// The parameter types given in the source code.
+ ///
+ /// The `out` and `inout` qualifiers don't affect the types that appear
+ /// here. For example, an `inout vec2 a` argument would simply be a
+ /// [`Vector`], not a pointer to one.
+ ///
+ /// [`Vector`]: crate::TypeInner::Vector
+ pub parameters: Vec<Handle<Type>>,
+ pub parameters_info: Vec<ParameterInfo>,
+
+ pub symbol_table: crate::front::SymbolTable<String, VariableReference>,
+ pub samplers: FastHashMap<Handle<Expression>, Handle<Expression>>,
+
+ pub typifier: Typifier,
+ emitter: Emitter,
+ stmt_ctx: Option<StmtContext>,
+}
+
+impl Context {
+ pub fn new(frontend: &Frontend, body: &mut Block) -> Self {
+ let mut this = Context {
+ expressions: Arena::new(),
+ locals: Arena::new(),
+ arguments: Vec::new(),
+
+ parameters: Vec::new(),
+ parameters_info: Vec::new(),
+
+ symbol_table: crate::front::SymbolTable::default(),
+ samplers: FastHashMap::default(),
+
+ typifier: Typifier::new(),
+ emitter: Emitter::default(),
+ stmt_ctx: Some(StmtContext::new()),
+ };
+
+ this.emit_start();
+
+ for &(ref name, lookup) in frontend.global_variables.iter() {
+ this.add_global(frontend, name, lookup, body)
+ }
+
+ this
+ }
+
+ pub fn add_global(
+ &mut self,
+ frontend: &Frontend,
+ name: &str,
+ GlobalLookup {
+ kind,
+ entry_arg,
+ mutable,
+ }: GlobalLookup,
+ body: &mut Block,
+ ) {
+ self.emit_end(body);
+ let (expr, load, constant) = match kind {
+ GlobalLookupKind::Variable(v) => {
+ let span = frontend.module.global_variables.get_span(v);
+ let res = (
+ self.expressions.append(Expression::GlobalVariable(v), span),
+ frontend.module.global_variables[v].space != AddressSpace::Handle,
+ None,
+ );
+ self.emit_start();
+
+ res
+ }
+ GlobalLookupKind::BlockSelect(handle, index) => {
+ let span = frontend.module.global_variables.get_span(handle);
+ let base = self
+ .expressions
+ .append(Expression::GlobalVariable(handle), span);
+ self.emit_start();
+ let expr = self
+ .expressions
+ .append(Expression::AccessIndex { base, index }, span);
+
+ (
+ expr,
+ {
+ let ty = frontend.module.global_variables[handle].ty;
+
+ match frontend.module.types[ty].inner {
+ TypeInner::Struct { ref members, .. } => {
+ if let TypeInner::Array {
+ size: crate::ArraySize::Dynamic,
+ ..
+ } = frontend.module.types[members[index as usize].ty].inner
+ {
+ false
+ } else {
+ true
+ }
+ }
+ _ => true,
+ }
+ },
+ None,
+ )
+ }
+ GlobalLookupKind::Constant(v, ty) => {
+ let span = frontend.module.constants.get_span(v);
+ let res = (
+ self.expressions.append(Expression::Constant(v), span),
+ false,
+ Some((v, ty)),
+ );
+ self.emit_start();
+ res
+ }
+ };
+
+ let var = VariableReference {
+ expr,
+ load,
+ mutable,
+ constant,
+ entry_arg,
+ };
+
+ self.symbol_table.add(name.into(), var);
+ }
+
+ /// Starts the expression emitter
+ ///
+ /// # Panics
+ ///
+ /// - If called twice in a row without calling [`emit_end`][Self::emit_end].
+ #[inline]
+ pub fn emit_start(&mut self) {
+ self.emitter.start(&self.expressions)
+ }
+
+ /// Emits all the expressions captured by the emitter to the passed `body`
+ ///
+ /// # Panics
+ ///
+ /// - If called before calling [`emit_start`].
+ /// - If called twice in a row without calling [`emit_start`].
+ ///
+ /// [`emit_start`]: Self::emit_start
+ pub fn emit_end(&mut self, body: &mut Block) {
+ body.extend(self.emitter.finish(&self.expressions))
+ }
+
+ /// Emits all the expressions captured by the emitter to the passed `body`
+ /// and starts the emitter again
+ ///
+ /// # Panics
+ ///
+ /// - If called before calling [`emit_start`][Self::emit_start].
+ pub fn emit_restart(&mut self, body: &mut Block) {
+ self.emit_end(body);
+ self.emit_start()
+ }
+
+ pub fn add_expression(
+ &mut self,
+ expr: Expression,
+ meta: Span,
+ body: &mut Block,
+ ) -> Handle<Expression> {
+ let needs_pre_emit = expr.needs_pre_emit();
+ if needs_pre_emit {
+ self.emit_end(body);
+ }
+ let handle = self.expressions.append(expr, meta);
+ if needs_pre_emit {
+ self.emit_start();
+ }
+ handle
+ }
+
+ /// Add variable to current scope
+ ///
+ /// Returns a variable if a variable with the same name was already defined,
+ /// otherwise returns `None`
+ pub fn add_local_var(
+ &mut self,
+ name: String,
+ expr: Handle<Expression>,
+ mutable: bool,
+ ) -> Option<VariableReference> {
+ let var = VariableReference {
+ expr,
+ load: true,
+ mutable,
+ constant: None,
+ entry_arg: None,
+ };
+
+ self.symbol_table.add(name, var)
+ }
+
+ /// Add function argument to current scope
+ pub fn add_function_arg(
+ &mut self,
+ frontend: &mut Frontend,
+ body: &mut Block,
+ name_meta: Option<(String, Span)>,
+ ty: Handle<Type>,
+ qualifier: ParameterQualifier,
+ ) {
+ let index = self.arguments.len();
+ let mut arg = FunctionArgument {
+ name: name_meta.as_ref().map(|&(ref name, _)| name.clone()),
+ ty,
+ binding: None,
+ };
+ self.parameters.push(ty);
+
+ let opaque = match frontend.module.types[ty].inner {
+ TypeInner::Image { .. } | TypeInner::Sampler { .. } => true,
+ _ => false,
+ };
+
+ if qualifier.is_lhs() {
+ let span = frontend.module.types.get_span(arg.ty);
+ arg.ty = frontend.module.types.insert(
+ Type {
+ name: None,
+ inner: TypeInner::Pointer {
+ base: arg.ty,
+ space: AddressSpace::Function,
+ },
+ },
+ span,
+ )
+ }
+
+ self.arguments.push(arg);
+
+ self.parameters_info.push(ParameterInfo {
+ qualifier,
+ depth: false,
+ });
+
+ if let Some((name, meta)) = name_meta {
+ let expr = self.add_expression(Expression::FunctionArgument(index as u32), meta, body);
+ let mutable = qualifier != ParameterQualifier::Const && !opaque;
+ let load = qualifier.is_lhs();
+
+ let var = if mutable && !load {
+ let handle = self.locals.append(
+ LocalVariable {
+ name: Some(name.clone()),
+ ty,
+ init: None,
+ },
+ meta,
+ );
+ let local_expr = self.add_expression(Expression::LocalVariable(handle), meta, body);
+
+ self.emit_restart(body);
+
+ body.push(
+ Statement::Store {
+ pointer: local_expr,
+ value: expr,
+ },
+ meta,
+ );
+
+ VariableReference {
+ expr: local_expr,
+ load: true,
+ mutable,
+ constant: None,
+ entry_arg: None,
+ }
+ } else {
+ VariableReference {
+ expr,
+ load,
+ mutable,
+ constant: None,
+ entry_arg: None,
+ }
+ };
+
+ self.symbol_table.add(name, var);
+ }
+ }
+
+ /// Returns a [`StmtContext`](StmtContext) to be used in parsing and lowering
+ ///
+ /// # Panics
+ /// - If more than one [`StmtContext`](StmtContext) are active at the same
+ /// time or if the previous call didn't use it in lowering.
+ #[must_use]
+ pub fn stmt_ctx(&mut self) -> StmtContext {
+ self.stmt_ctx.take().unwrap()
+ }
+
+ /// Lowers a [`HirExpr`](HirExpr) which might produce a [`Expression`](Expression).
+ ///
+ /// consumes a [`StmtContext`](StmtContext) returning it to the context so
+ /// that it can be used again later.
+ pub fn lower(
+ &mut self,
+ mut stmt: StmtContext,
+ frontend: &mut Frontend,
+ expr: Handle<HirExpr>,
+ pos: ExprPos,
+ body: &mut Block,
+ ) -> Result<(Option<Handle<Expression>>, Span)> {
+ let res = self.lower_inner(&stmt, frontend, expr, pos, body);
+
+ stmt.hir_exprs.clear();
+ self.stmt_ctx = Some(stmt);
+
+ res
+ }
+
+ /// Similar to [`lower`](Self::lower) but returns an error if the expression
+ /// returns void (ie. doesn't produce a [`Expression`](Expression)).
+ ///
+ /// consumes a [`StmtContext`](StmtContext) returning it to the context so
+ /// that it can be used again later.
+ pub fn lower_expect(
+ &mut self,
+ mut stmt: StmtContext,
+ frontend: &mut Frontend,
+ expr: Handle<HirExpr>,
+ pos: ExprPos,
+ body: &mut Block,
+ ) -> Result<(Handle<Expression>, Span)> {
+ let res = self.lower_expect_inner(&stmt, frontend, expr, pos, body);
+
+ stmt.hir_exprs.clear();
+ self.stmt_ctx = Some(stmt);
+
+ res
+ }
+
+ /// internal implementation of [`lower_expect`](Self::lower_expect)
+ ///
+ /// this method is only public because it's used in
+ /// [`function_call`](Frontend::function_call), unless you know what
+ /// you're doing use [`lower_expect`](Self::lower_expect)
+ pub fn lower_expect_inner(
+ &mut self,
+ stmt: &StmtContext,
+ frontend: &mut Frontend,
+ expr: Handle<HirExpr>,
+ pos: ExprPos,
+ body: &mut Block,
+ ) -> Result<(Handle<Expression>, Span)> {
+ let (maybe_expr, meta) = self.lower_inner(stmt, frontend, expr, pos, body)?;
+
+ let expr = match maybe_expr {
+ Some(e) => e,
+ None => {
+ return Err(Error {
+ kind: ErrorKind::SemanticError("Expression returns void".into()),
+ meta,
+ })
+ }
+ };
+
+ Ok((expr, meta))
+ }
+
+ fn lower_store(
+ &mut self,
+ pointer: Handle<Expression>,
+ value: Handle<Expression>,
+ meta: Span,
+ body: &mut Block,
+ ) {
+ if let Expression::Swizzle {
+ size,
+ mut vector,
+ pattern,
+ } = self.expressions[pointer]
+ {
+ // Stores to swizzled values are not directly supported,
+ // lower them as series of per-component stores.
+ let size = match size {
+ VectorSize::Bi => 2,
+ VectorSize::Tri => 3,
+ VectorSize::Quad => 4,
+ };
+
+ if let Expression::Load { pointer } = self.expressions[vector] {
+ vector = pointer;
+ }
+
+ #[allow(clippy::needless_range_loop)]
+ for index in 0..size {
+ let dst = self.add_expression(
+ Expression::AccessIndex {
+ base: vector,
+ index: pattern[index].index(),
+ },
+ meta,
+ body,
+ );
+ let src = self.add_expression(
+ Expression::AccessIndex {
+ base: value,
+ index: index as u32,
+ },
+ meta,
+ body,
+ );
+
+ self.emit_restart(body);
+
+ body.push(
+ Statement::Store {
+ pointer: dst,
+ value: src,
+ },
+ meta,
+ );
+ }
+ } else {
+ self.emit_restart(body);
+
+ body.push(Statement::Store { pointer, value }, meta);
+ }
+ }
+
+ /// Internal implementation of [`lower`](Self::lower)
+ fn lower_inner(
+ &mut self,
+ stmt: &StmtContext,
+ frontend: &mut Frontend,
+ expr: Handle<HirExpr>,
+ pos: ExprPos,
+ body: &mut Block,
+ ) -> Result<(Option<Handle<Expression>>, Span)> {
+ let HirExpr { ref kind, meta } = stmt.hir_exprs[expr];
+
+ log::debug!("Lowering {:?} (kind {:?}, pos {:?})", expr, kind, pos);
+
+ let handle = match *kind {
+ HirExprKind::Access { base, index } => {
+ let (index, index_meta) =
+ self.lower_expect_inner(stmt, frontend, index, ExprPos::Rhs, body)?;
+ let maybe_constant_index = match pos {
+ // Don't try to generate `AccessIndex` if in a LHS position, since it
+ // wouldn't produce a pointer.
+ ExprPos::Lhs => None,
+ _ => frontend.solve_constant(self, index, index_meta).ok(),
+ };
+
+ let base = self
+ .lower_expect_inner(
+ stmt,
+ frontend,
+ base,
+ pos.maybe_access_base(maybe_constant_index.is_some()),
+ body,
+ )?
+ .0;
+
+ let pointer = maybe_constant_index
+ .and_then(|constant| {
+ Some(self.add_expression(
+ Expression::AccessIndex {
+ base,
+ index: match frontend.module.constants[constant].inner {
+ crate::ConstantInner::Scalar {
+ value: ScalarValue::Uint(i),
+ ..
+ } => u32::try_from(i).ok()?,
+ crate::ConstantInner::Scalar {
+ value: ScalarValue::Sint(i),
+ ..
+ } => u32::try_from(i).ok()?,
+ _ => return None,
+ },
+ },
+ meta,
+ body,
+ ))
+ })
+ .unwrap_or_else(|| {
+ self.add_expression(Expression::Access { base, index }, meta, body)
+ });
+
+ if ExprPos::Rhs == pos {
+ let resolved = frontend.resolve_type(self, pointer, meta)?;
+ if resolved.pointer_space().is_some() {
+ return Ok((
+ Some(self.add_expression(Expression::Load { pointer }, meta, body)),
+ meta,
+ ));
+ }
+ }
+
+ pointer
+ }
+ HirExprKind::Select { base, ref field } => {
+ let base = self.lower_expect_inner(stmt, frontend, base, pos, body)?.0;
+
+ frontend.field_selection(self, pos, body, base, field, meta)?
+ }
+ HirExprKind::Constant(constant) if pos != ExprPos::Lhs => {
+ self.add_expression(Expression::Constant(constant), meta, body)
+ }
+ HirExprKind::Binary { left, op, right } if pos != ExprPos::Lhs => {
+ let (mut left, left_meta) =
+ self.lower_expect_inner(stmt, frontend, left, ExprPos::Rhs, body)?;
+ let (mut right, right_meta) =
+ self.lower_expect_inner(stmt, frontend, right, ExprPos::Rhs, body)?;
+
+ match op {
+ BinaryOperator::ShiftLeft | BinaryOperator::ShiftRight => self
+ .implicit_conversion(
+ frontend,
+ &mut right,
+ right_meta,
+ ScalarKind::Uint,
+ 4,
+ )?,
+ _ => self.binary_implicit_conversion(
+ frontend, &mut left, left_meta, &mut right, right_meta,
+ )?,
+ }
+
+ frontend.typifier_grow(self, left, left_meta)?;
+ frontend.typifier_grow(self, right, right_meta)?;
+
+ let left_inner = self.typifier.get(left, &frontend.module.types);
+ let right_inner = self.typifier.get(right, &frontend.module.types);
+
+ match (left_inner, right_inner) {
+ (
+ &TypeInner::Matrix {
+ columns: left_columns,
+ rows: left_rows,
+ width: left_width,
+ },
+ &TypeInner::Matrix {
+ columns: right_columns,
+ rows: right_rows,
+ width: right_width,
+ },
+ ) => {
+ let dimensions_ok = if op == BinaryOperator::Multiply {
+ left_columns == right_rows
+ } else {
+ left_columns == right_columns && left_rows == right_rows
+ };
+
+ // Check that the two arguments have the same dimensions
+ if !dimensions_ok || left_width != right_width {
+ frontend.errors.push(Error {
+ kind: ErrorKind::SemanticError(
+ format!(
+ "Cannot apply operation to {left_inner:?} and {right_inner:?}"
+ )
+ .into(),
+ ),
+ meta,
+ })
+ }
+
+ match op {
+ BinaryOperator::Divide => {
+ // Naga IR doesn't support matrix division so we need to
+ // divide the columns individually and reassemble the matrix
+ let mut components = Vec::with_capacity(left_columns as usize);
+
+ for index in 0..left_columns as u32 {
+ // Get the column vectors
+ let left_vector = self.add_expression(
+ Expression::AccessIndex { base: left, index },
+ meta,
+ body,
+ );
+ let right_vector = self.add_expression(
+ Expression::AccessIndex { base: right, index },
+ meta,
+ body,
+ );
+
+ // Divide the vectors
+ let column = self.expressions.append(
+ Expression::Binary {
+ op,
+ left: left_vector,
+ right: right_vector,
+ },
+ meta,
+ );
+
+ components.push(column)
+ }
+
+ // Rebuild the matrix from the divided vectors
+ self.expressions.append(
+ Expression::Compose {
+ ty: frontend.module.types.insert(
+ Type {
+ name: None,
+ inner: TypeInner::Matrix {
+ columns: left_columns,
+ rows: left_rows,
+ width: left_width,
+ },
+ },
+ Span::default(),
+ ),
+ components,
+ },
+ meta,
+ )
+ }
+ BinaryOperator::Equal | BinaryOperator::NotEqual => {
+ // Naga IR doesn't support matrix comparisons so we need to
+ // compare the columns individually and then fold them together
+ //
+ // The folding is done using a logical and for equality and
+ // a logical or for inequality
+ let equals = op == BinaryOperator::Equal;
+
+ let (op, combine, fun) = match equals {
+ true => (
+ BinaryOperator::Equal,
+ BinaryOperator::LogicalAnd,
+ RelationalFunction::All,
+ ),
+ false => (
+ BinaryOperator::NotEqual,
+ BinaryOperator::LogicalOr,
+ RelationalFunction::Any,
+ ),
+ };
+
+ let mut root = None;
+
+ for index in 0..left_columns as u32 {
+ // Get the column vectors
+ let left_vector = self.add_expression(
+ Expression::AccessIndex { base: left, index },
+ meta,
+ body,
+ );
+ let right_vector = self.add_expression(
+ Expression::AccessIndex { base: right, index },
+ meta,
+ body,
+ );
+
+ let argument = self.expressions.append(
+ Expression::Binary {
+ op,
+ left: left_vector,
+ right: right_vector,
+ },
+ meta,
+ );
+
+ // The result of comparing two vectors is a boolean vector
+ // so use a relational function like all to get a single
+ // boolean value
+ let compare = self.add_expression(
+ Expression::Relational { fun, argument },
+ meta,
+ body,
+ );
+
+ // Fold the result
+ root = Some(match root {
+ Some(right) => self.add_expression(
+ Expression::Binary {
+ op: combine,
+ left: compare,
+ right,
+ },
+ meta,
+ body,
+ ),
+ None => compare,
+ });
+ }
+
+ root.unwrap()
+ }
+ _ => self.add_expression(
+ Expression::Binary { left, op, right },
+ meta,
+ body,
+ ),
+ }
+ }
+ (&TypeInner::Vector { .. }, &TypeInner::Vector { .. }) => match op {
+ BinaryOperator::Equal | BinaryOperator::NotEqual => {
+ let equals = op == BinaryOperator::Equal;
+
+ let (op, fun) = match equals {
+ true => (BinaryOperator::Equal, RelationalFunction::All),
+ false => (BinaryOperator::NotEqual, RelationalFunction::Any),
+ };
+
+ let argument = self
+ .expressions
+ .append(Expression::Binary { op, left, right }, meta);
+
+ self.add_expression(
+ Expression::Relational { fun, argument },
+ meta,
+ body,
+ )
+ }
+ _ => {
+ self.add_expression(Expression::Binary { left, op, right }, meta, body)
+ }
+ },
+ (&TypeInner::Vector { size, .. }, &TypeInner::Scalar { .. }) => match op {
+ BinaryOperator::Add
+ | BinaryOperator::Subtract
+ | BinaryOperator::Divide
+ | BinaryOperator::And
+ | BinaryOperator::ExclusiveOr
+ | BinaryOperator::InclusiveOr
+ | BinaryOperator::ShiftLeft
+ | BinaryOperator::ShiftRight => {
+ let scalar_vector = self.add_expression(
+ Expression::Splat { size, value: right },
+ meta,
+ body,
+ );
+
+ self.add_expression(
+ Expression::Binary {
+ op,
+ left,
+ right: scalar_vector,
+ },
+ meta,
+ body,
+ )
+ }
+ _ => {
+ self.add_expression(Expression::Binary { left, op, right }, meta, body)
+ }
+ },
+ (&TypeInner::Scalar { .. }, &TypeInner::Vector { size, .. }) => match op {
+ BinaryOperator::Add
+ | BinaryOperator::Subtract
+ | BinaryOperator::Divide
+ | BinaryOperator::And
+ | BinaryOperator::ExclusiveOr
+ | BinaryOperator::InclusiveOr => {
+ let scalar_vector = self.add_expression(
+ Expression::Splat { size, value: left },
+ meta,
+ body,
+ );
+
+ self.add_expression(
+ Expression::Binary {
+ op,
+ left: scalar_vector,
+ right,
+ },
+ meta,
+ body,
+ )
+ }
+ _ => {
+ self.add_expression(Expression::Binary { left, op, right }, meta, body)
+ }
+ },
+ (
+ &TypeInner::Scalar {
+ width: left_width, ..
+ },
+ &TypeInner::Matrix {
+ rows,
+ columns,
+ width: right_width,
+ },
+ ) => {
+ // Check that the two arguments have the same width
+ if left_width != right_width {
+ frontend.errors.push(Error {
+ kind: ErrorKind::SemanticError(
+ format!(
+ "Cannot apply operation to {left_inner:?} and {right_inner:?}"
+ )
+ .into(),
+ ),
+ meta,
+ })
+ }
+
+ match op {
+ BinaryOperator::Divide
+ | BinaryOperator::Add
+ | BinaryOperator::Subtract => {
+ // Naga IR doesn't support all matrix by scalar operations so
+ // we need for some to turn the scalar into a vector by
+ // splatting it and then for each column vector apply the
+ // operation and finally reconstruct the matrix
+ let scalar_vector = self.add_expression(
+ Expression::Splat {
+ size: rows,
+ value: left,
+ },
+ meta,
+ body,
+ );
+
+ let mut components = Vec::with_capacity(columns as usize);
+
+ for index in 0..columns as u32 {
+ // Get the column vector
+ let matrix_column = self.add_expression(
+ Expression::AccessIndex { base: right, index },
+ meta,
+ body,
+ );
+
+ // Apply the operation to the splatted vector and
+ // the column vector
+ let column = self.expressions.append(
+ Expression::Binary {
+ op,
+ left: scalar_vector,
+ right: matrix_column,
+ },
+ meta,
+ );
+
+ components.push(column)
+ }
+
+ // Rebuild the matrix from the operation result vectors
+ self.expressions.append(
+ Expression::Compose {
+ ty: frontend.module.types.insert(
+ Type {
+ name: None,
+ inner: TypeInner::Matrix {
+ columns,
+ rows,
+ width: left_width,
+ },
+ },
+ Span::default(),
+ ),
+ components,
+ },
+ meta,
+ )
+ }
+ _ => self.add_expression(
+ Expression::Binary { left, op, right },
+ meta,
+ body,
+ ),
+ }
+ }
+ (
+ &TypeInner::Matrix {
+ rows,
+ columns,
+ width: left_width,
+ },
+ &TypeInner::Scalar {
+ width: right_width, ..
+ },
+ ) => {
+ // Check that the two arguments have the same width
+ if left_width != right_width {
+ frontend.errors.push(Error {
+ kind: ErrorKind::SemanticError(
+ format!(
+ "Cannot apply operation to {left_inner:?} and {right_inner:?}"
+ )
+ .into(),
+ ),
+ meta,
+ })
+ }
+
+ match op {
+ BinaryOperator::Divide
+ | BinaryOperator::Add
+ | BinaryOperator::Subtract => {
+ // Naga IR doesn't support all matrix by scalar operations so
+ // we need for some to turn the scalar into a vector by
+ // splatting it and then for each column vector apply the
+ // operation and finally reconstruct the matrix
+
+ let scalar_vector = self.add_expression(
+ Expression::Splat {
+ size: rows,
+ value: right,
+ },
+ meta,
+ body,
+ );
+
+ let mut components = Vec::with_capacity(columns as usize);
+
+ for index in 0..columns as u32 {
+ // Get the column vector
+ let matrix_column = self.add_expression(
+ Expression::AccessIndex { base: left, index },
+ meta,
+ body,
+ );
+
+ // Apply the operation to the splatted vector and
+ // the column vector
+ let column = self.expressions.append(
+ Expression::Binary {
+ op,
+ left: matrix_column,
+ right: scalar_vector,
+ },
+ meta,
+ );
+
+ components.push(column)
+ }
+
+ // Rebuild the matrix from the operation result vectors
+ self.expressions.append(
+ Expression::Compose {
+ ty: frontend.module.types.insert(
+ Type {
+ name: None,
+ inner: TypeInner::Matrix {
+ columns,
+ rows,
+ width: left_width,
+ },
+ },
+ Span::default(),
+ ),
+ components,
+ },
+ meta,
+ )
+ }
+ _ => self.add_expression(
+ Expression::Binary { left, op, right },
+ meta,
+ body,
+ ),
+ }
+ }
+ _ => self.add_expression(Expression::Binary { left, op, right }, meta, body),
+ }
+ }
+ HirExprKind::Unary { op, expr } if pos != ExprPos::Lhs => {
+ let expr = self
+ .lower_expect_inner(stmt, frontend, expr, ExprPos::Rhs, body)?
+ .0;
+
+ self.add_expression(Expression::Unary { op, expr }, meta, body)
+ }
+ HirExprKind::Variable(ref var) => match pos {
+ ExprPos::Lhs => {
+ if !var.mutable {
+ frontend.errors.push(Error {
+ kind: ErrorKind::SemanticError(
+ "Variable cannot be used in LHS position".into(),
+ ),
+ meta,
+ })
+ }
+
+ var.expr
+ }
+ ExprPos::AccessBase { constant_index } => {
+ // If the index isn't constant all accesses backed by a constant base need
+ // to be done through a proxy local variable, since constants have a non
+ // pointer type which is required for dynamic indexing
+ if !constant_index {
+ if let Some((constant, ty)) = var.constant {
+ let local = self.locals.append(
+ LocalVariable {
+ name: None,
+ ty,
+ init: Some(constant),
+ },
+ Span::default(),
+ );
+
+ self.add_expression(
+ Expression::LocalVariable(local),
+ Span::default(),
+ body,
+ )
+ } else {
+ var.expr
+ }
+ } else {
+ var.expr
+ }
+ }
+ _ if var.load => {
+ self.add_expression(Expression::Load { pointer: var.expr }, meta, body)
+ }
+ ExprPos::Rhs => var.expr,
+ },
+ HirExprKind::Call(ref call) if pos != ExprPos::Lhs => {
+ let maybe_expr = frontend.function_or_constructor_call(
+ self,
+ stmt,
+ body,
+ call.kind.clone(),
+ &call.args,
+ meta,
+ )?;
+ return Ok((maybe_expr, meta));
+ }
+ // `HirExprKind::Conditional` represents the ternary operator in glsl (`:?`)
+ //
+ // The ternary operator is defined to only evaluate one of the two possible
+ // expressions which means that it's behavior is that of an `if` statement,
+ // and it's merely syntatic sugar for it.
+ HirExprKind::Conditional {
+ condition,
+ accept,
+ reject,
+ } if ExprPos::Lhs != pos => {
+ // Given an expression `a ? b : c`, we need to produce a Naga
+ // statement roughly like:
+ //
+ // var temp;
+ // if a {
+ // temp = convert(b);
+ // } else {
+ // temp = convert(c);
+ // }
+ //
+ // where `convert` stands for type conversions to bring `b` and `c` to
+ // the same type, and then use `temp` to represent the value of the whole
+ // conditional expression in subsequent code.
+
+ // Lower the condition first to the current bodyy
+ let condition = self
+ .lower_expect_inner(stmt, frontend, condition, ExprPos::Rhs, body)?
+ .0;
+
+ // Emit all expressions since we will be adding statements to
+ // other bodies next
+ self.emit_restart(body);
+
+ // Create the bodies for the two cases
+ let mut accept_body = Block::new();
+ let mut reject_body = Block::new();
+
+ // Lower the `true` branch
+ let (mut accept, accept_meta) =
+ self.lower_expect_inner(stmt, frontend, accept, pos, &mut accept_body)?;
+
+ // Flush the body of the `true` branch, to start emitting on the
+ // `false` branch
+ self.emit_restart(&mut accept_body);
+
+ // Lower the `false` branch
+ let (mut reject, reject_meta) =
+ self.lower_expect_inner(stmt, frontend, reject, pos, &mut reject_body)?;
+
+ // Flush the body of the `false` branch
+ self.emit_restart(&mut reject_body);
+
+ // We need to do some custom implicit conversions since the two target expressions
+ // are in different bodies
+ if let (
+ Some((accept_power, accept_width, accept_kind)),
+ Some((reject_power, reject_width, reject_kind)),
+ ) = (
+ // Get the components of both branches and calculate the type power
+ self.expr_scalar_components(frontend, accept, accept_meta)?
+ .and_then(|(kind, width)| Some((type_power(kind, width)?, width, kind))),
+ self.expr_scalar_components(frontend, reject, reject_meta)?
+ .and_then(|(kind, width)| Some((type_power(kind, width)?, width, kind))),
+ ) {
+ match accept_power.cmp(&reject_power) {
+ std::cmp::Ordering::Less => {
+ self.conversion(&mut accept, accept_meta, reject_kind, reject_width)?;
+ // The expression belongs to the `true` branch so we need to flush to
+ // the respective body
+ self.emit_end(&mut accept_body);
+ }
+ // Technically there's nothing to flush but later we will need to
+ // add some expressions that must not be emitted so instead
+ // of flushing, starting and flushing again, just make sure
+ // everything is flushed.
+ std::cmp::Ordering::Equal => self.emit_end(body),
+ std::cmp::Ordering::Greater => {
+ self.conversion(&mut reject, reject_meta, accept_kind, accept_width)?;
+ // The expression belongs to the `false` branch so we need to flush to
+ // the respective body
+ self.emit_end(&mut reject_body);
+ }
+ }
+ } else {
+ // Technically there's nothing to flush but later we will need to
+ // add some expressions that must not be emitted.
+ self.emit_end(body)
+ }
+
+ // We need to get the type of the resulting expression to create the local,
+ // this must be done after implicit conversions to ensure both branches have
+ // the same type.
+ let ty = frontend.resolve_type_handle(self, accept, accept_meta)?;
+
+ // Add the local that will hold the result of our conditional
+ let local = self.locals.append(
+ LocalVariable {
+ name: None,
+ ty,
+ init: None,
+ },
+ meta,
+ );
+
+ // Note: `Expression::LocalVariable` must not be emited so it's important
+ // that at this point the emitter is flushed but not started.
+ let local_expr = self
+ .expressions
+ .append(Expression::LocalVariable(local), meta);
+
+ // Add to each body the store to the result variable
+ accept_body.push(
+ Statement::Store {
+ pointer: local_expr,
+ value: accept,
+ },
+ accept_meta,
+ );
+ reject_body.push(
+ Statement::Store {
+ pointer: local_expr,
+ value: reject,
+ },
+ reject_meta,
+ );
+
+ // Finally add the `If` to the main body with the `condition` we lowered
+ // earlier and the branches we prepared.
+ body.push(
+ Statement::If {
+ condition,
+ accept: accept_body,
+ reject: reject_body,
+ },
+ meta,
+ );
+
+ // Restart the emitter
+ self.emit_start();
+
+ // Note: `Expression::Load` must be emited before it's used so make
+ // sure the emitter is active here.
+ self.expressions.append(
+ Expression::Load {
+ pointer: local_expr,
+ },
+ meta,
+ )
+ }
+ HirExprKind::Assign { tgt, value } if ExprPos::Lhs != pos => {
+ let (pointer, ptr_meta) =
+ self.lower_expect_inner(stmt, frontend, tgt, ExprPos::Lhs, body)?;
+ let (mut value, value_meta) =
+ self.lower_expect_inner(stmt, frontend, value, ExprPos::Rhs, body)?;
+
+ let ty = match *frontend.resolve_type(self, pointer, ptr_meta)? {
+ TypeInner::Pointer { base, .. } => &frontend.module.types[base].inner,
+ ref ty => ty,
+ };
+
+ if let Some((kind, width)) = scalar_components(ty) {
+ self.implicit_conversion(frontend, &mut value, value_meta, kind, width)?;
+ }
+
+ self.lower_store(pointer, value, meta, body);
+
+ value
+ }
+ HirExprKind::PrePostfix { op, postfix, expr } if ExprPos::Lhs != pos => {
+ let (pointer, _) =
+ self.lower_expect_inner(stmt, frontend, expr, ExprPos::Lhs, body)?;
+ let left = if let Expression::Swizzle { .. } = self.expressions[pointer] {
+ pointer
+ } else {
+ self.add_expression(Expression::Load { pointer }, meta, body)
+ };
+
+ let make_constant_inner = |kind, width| {
+ let value = match kind {
+ ScalarKind::Sint => crate::ScalarValue::Sint(1),
+ ScalarKind::Uint => crate::ScalarValue::Uint(1),
+ ScalarKind::Float => crate::ScalarValue::Float(1.0),
+ ScalarKind::Bool => return None,
+ };
+
+ Some(crate::ConstantInner::Scalar { width, value })
+ };
+ let res = match *frontend.resolve_type(self, left, meta)? {
+ TypeInner::Scalar { kind, width } => {
+ let ty = TypeInner::Scalar { kind, width };
+ make_constant_inner(kind, width).map(|i| (ty, i, None, None))
+ }
+ TypeInner::Vector { size, kind, width } => {
+ let ty = TypeInner::Vector { size, kind, width };
+ make_constant_inner(kind, width).map(|i| (ty, i, Some(size), None))
+ }
+ TypeInner::Matrix {
+ columns,
+ rows,
+ width,
+ } => {
+ let ty = TypeInner::Matrix {
+ columns,
+ rows,
+ width,
+ };
+ make_constant_inner(ScalarKind::Float, width)
+ .map(|i| (ty, i, Some(rows), Some(columns)))
+ }
+ _ => None,
+ };
+ let (ty_inner, inner, rows, columns) = match res {
+ Some(res) => res,
+ None => {
+ frontend.errors.push(Error {
+ kind: ErrorKind::SemanticError(
+ "Increment/decrement only works on scalar/vector/matrix".into(),
+ ),
+ meta,
+ });
+ return Ok((Some(left), meta));
+ }
+ };
+
+ let constant_1 = frontend.module.constants.append(
+ Constant {
+ name: None,
+ specialization: None,
+ inner,
+ },
+ Default::default(),
+ );
+ let mut right = self.add_expression(Expression::Constant(constant_1), meta, body);
+
+ // Glsl allows pre/postfixes operations on vectors and matrices, so if the
+ // target is either of them change the right side of the addition to be splatted
+ // to the same size as the target, furthermore if the target is a matrix
+ // use a composed matrix using the splatted value.
+ if let Some(size) = rows {
+ right =
+ self.add_expression(Expression::Splat { size, value: right }, meta, body);
+
+ if let Some(cols) = columns {
+ let ty = frontend.module.types.insert(
+ Type {
+ name: None,
+ inner: ty_inner,
+ },
+ meta,
+ );
+
+ right = self.add_expression(
+ Expression::Compose {
+ ty,
+ components: std::iter::repeat(right).take(cols as usize).collect(),
+ },
+ meta,
+ body,
+ );
+ }
+ }
+
+ let value = self.add_expression(Expression::Binary { op, left, right }, meta, body);
+
+ self.lower_store(pointer, value, meta, body);
+
+ if postfix {
+ left
+ } else {
+ value
+ }
+ }
+ HirExprKind::Method {
+ expr: object,
+ ref name,
+ ref args,
+ } if ExprPos::Lhs != pos => {
+ let args = args
+ .iter()
+ .map(|e| self.lower_expect_inner(stmt, frontend, *e, ExprPos::Rhs, body))
+ .collect::<Result<Vec<_>>>()?;
+ match name.as_ref() {
+ "length" => {
+ if !args.is_empty() {
+ frontend.errors.push(Error {
+ kind: ErrorKind::SemanticError(
+ ".length() doesn't take any arguments".into(),
+ ),
+ meta,
+ });
+ }
+ let lowered_array = self
+ .lower_expect_inner(stmt, frontend, object, pos, body)?
+ .0;
+ let array_type = frontend.resolve_type(self, lowered_array, meta)?;
+
+ match *array_type {
+ TypeInner::Array {
+ size: crate::ArraySize::Constant(size),
+ ..
+ } => {
+ let mut array_length =
+ self.add_expression(Expression::Constant(size), meta, body);
+ self.forced_conversion(
+ frontend,
+ &mut array_length,
+ meta,
+ ScalarKind::Sint,
+ 4,
+ )?;
+ array_length
+ }
+ // let the error be handled in type checking if it's not a dynamic array
+ _ => {
+ let mut array_length = self.add_expression(
+ Expression::ArrayLength(lowered_array),
+ meta,
+ body,
+ );
+ self.conversion(&mut array_length, meta, ScalarKind::Sint, 4)?;
+ array_length
+ }
+ }
+ }
+ _ => {
+ return Err(Error {
+ kind: ErrorKind::SemanticError(
+ format!("unknown method '{name}'").into(),
+ ),
+ meta,
+ });
+ }
+ }
+ }
+ _ => {
+ return Err(Error {
+ kind: ErrorKind::SemanticError(
+ format!("{:?} cannot be in the left hand side", stmt.hir_exprs[expr])
+ .into(),
+ ),
+ meta,
+ })
+ }
+ };
+
+ log::trace!(
+ "Lowered {:?}\n\tKind = {:?}\n\tPos = {:?}\n\tResult = {:?}",
+ expr,
+ kind,
+ pos,
+ handle
+ );
+
+ Ok((Some(handle), meta))
+ }
+
+ pub fn expr_scalar_components(
+ &mut self,
+ frontend: &Frontend,
+ expr: Handle<Expression>,
+ meta: Span,
+ ) -> Result<Option<(ScalarKind, crate::Bytes)>> {
+ let ty = frontend.resolve_type(self, expr, meta)?;
+ Ok(scalar_components(ty))
+ }
+
+ pub fn expr_power(
+ &mut self,
+ frontend: &Frontend,
+ expr: Handle<Expression>,
+ meta: Span,
+ ) -> Result<Option<u32>> {
+ Ok(self
+ .expr_scalar_components(frontend, expr, meta)?
+ .and_then(|(kind, width)| type_power(kind, width)))
+ }
+
+ pub fn conversion(
+ &mut self,
+ expr: &mut Handle<Expression>,
+ meta: Span,
+ kind: ScalarKind,
+ width: crate::Bytes,
+ ) -> Result<()> {
+ *expr = self.expressions.append(
+ Expression::As {
+ expr: *expr,
+ kind,
+ convert: Some(width),
+ },
+ meta,
+ );
+
+ Ok(())
+ }
+
+ pub fn implicit_conversion(
+ &mut self,
+ frontend: &Frontend,
+ expr: &mut Handle<Expression>,
+ meta: Span,
+ kind: ScalarKind,
+ width: crate::Bytes,
+ ) -> Result<()> {
+ if let (Some(tgt_power), Some(expr_power)) = (
+ type_power(kind, width),
+ self.expr_power(frontend, *expr, meta)?,
+ ) {
+ if tgt_power > expr_power {
+ self.conversion(expr, meta, kind, width)?;
+ }
+ }
+
+ Ok(())
+ }
+
+ pub fn forced_conversion(
+ &mut self,
+ frontend: &Frontend,
+ expr: &mut Handle<Expression>,
+ meta: Span,
+ kind: ScalarKind,
+ width: crate::Bytes,
+ ) -> Result<()> {
+ if let Some((expr_scalar_kind, expr_width)) =
+ self.expr_scalar_components(frontend, *expr, meta)?
+ {
+ if expr_scalar_kind != kind || expr_width != width {
+ self.conversion(expr, meta, kind, width)?;
+ }
+ }
+
+ Ok(())
+ }
+
+ pub fn binary_implicit_conversion(
+ &mut self,
+ frontend: &Frontend,
+ left: &mut Handle<Expression>,
+ left_meta: Span,
+ right: &mut Handle<Expression>,
+ right_meta: Span,
+ ) -> Result<()> {
+ let left_components = self.expr_scalar_components(frontend, *left, left_meta)?;
+ let right_components = self.expr_scalar_components(frontend, *right, right_meta)?;
+
+ if let (
+ Some((left_power, left_width, left_kind)),
+ Some((right_power, right_width, right_kind)),
+ ) = (
+ left_components.and_then(|(kind, width)| Some((type_power(kind, width)?, width, kind))),
+ right_components
+ .and_then(|(kind, width)| Some((type_power(kind, width)?, width, kind))),
+ ) {
+ match left_power.cmp(&right_power) {
+ std::cmp::Ordering::Less => {
+ self.conversion(left, left_meta, right_kind, right_width)?;
+ }
+ std::cmp::Ordering::Equal => {}
+ std::cmp::Ordering::Greater => {
+ self.conversion(right, right_meta, left_kind, left_width)?;
+ }
+ }
+ }
+
+ Ok(())
+ }
+
+ pub fn implicit_splat(
+ &mut self,
+ frontend: &Frontend,
+ expr: &mut Handle<Expression>,
+ meta: Span,
+ vector_size: Option<VectorSize>,
+ ) -> Result<()> {
+ let expr_type = frontend.resolve_type(self, *expr, meta)?;
+
+ if let (&TypeInner::Scalar { .. }, Some(size)) = (expr_type, vector_size) {
+ *expr = self
+ .expressions
+ .append(Expression::Splat { size, value: *expr }, meta)
+ }
+
+ Ok(())
+ }
+
+ pub fn vector_resize(
+ &mut self,
+ size: VectorSize,
+ vector: Handle<Expression>,
+ meta: Span,
+ body: &mut Block,
+ ) -> Handle<Expression> {
+ self.add_expression(
+ Expression::Swizzle {
+ size,
+ vector,
+ pattern: crate::SwizzleComponent::XYZW,
+ },
+ meta,
+ body,
+ )
+ }
+}
+
+impl Index<Handle<Expression>> for Context {
+ type Output = Expression;
+
+ fn index(&self, index: Handle<Expression>) -> &Self::Output {
+ &self.expressions[index]
+ }
+}
+
+/// Helper struct passed when parsing expressions
+///
+/// This struct should only be obtained through [`stmt_ctx`](Context::stmt_ctx)
+/// and only one of these may be active at any time per context.
+#[derive(Debug)]
+pub struct StmtContext {
+ /// A arena of high level expressions which can be lowered through a
+ /// [`Context`](Context) to naga's [`Expression`](crate::Expression)s
+ pub hir_exprs: Arena<HirExpr>,
+}
+
+impl StmtContext {
+ const fn new() -> Self {
+ StmtContext {
+ hir_exprs: Arena::new(),
+ }
+ }
+}
diff --git a/third_party/rust/naga/src/front/glsl/error.rs b/third_party/rust/naga/src/front/glsl/error.rs
new file mode 100644
index 0000000000..46b3aecd08
--- /dev/null
+++ b/third_party/rust/naga/src/front/glsl/error.rs
@@ -0,0 +1,134 @@
+use super::{constants::ConstantSolvingError, token::TokenValue};
+use crate::Span;
+use pp_rs::token::PreprocessorError;
+use std::borrow::Cow;
+use thiserror::Error;
+
+fn join_with_comma(list: &[ExpectedToken]) -> String {
+ let mut string = "".to_string();
+ for (i, val) in list.iter().enumerate() {
+ string.push_str(&val.to_string());
+ match i {
+ i if i == list.len() - 1 => {}
+ i if i == list.len() - 2 => string.push_str(" or "),
+ _ => string.push_str(", "),
+ }
+ }
+ string
+}
+
+/// One of the expected tokens returned in [`InvalidToken`](ErrorKind::InvalidToken).
+#[derive(Debug, PartialEq)]
+pub enum ExpectedToken {
+ /// A specific token was expected.
+ Token(TokenValue),
+ /// A type was expected.
+ TypeName,
+ /// An identifier was expected.
+ Identifier,
+ /// An integer literal was expected.
+ IntLiteral,
+ /// A float literal was expected.
+ FloatLiteral,
+ /// A boolean literal was expected.
+ BoolLiteral,
+ /// The end of file was expected.
+ Eof,
+}
+impl From<TokenValue> for ExpectedToken {
+ fn from(token: TokenValue) -> Self {
+ ExpectedToken::Token(token)
+ }
+}
+impl std::fmt::Display for ExpectedToken {
+ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ match *self {
+ ExpectedToken::Token(ref token) => write!(f, "{token:?}"),
+ ExpectedToken::TypeName => write!(f, "a type"),
+ ExpectedToken::Identifier => write!(f, "identifier"),
+ ExpectedToken::IntLiteral => write!(f, "integer literal"),
+ ExpectedToken::FloatLiteral => write!(f, "float literal"),
+ ExpectedToken::BoolLiteral => write!(f, "bool literal"),
+ ExpectedToken::Eof => write!(f, "end of file"),
+ }
+ }
+}
+
+/// Information about the cause of an error.
+#[derive(Debug, Error)]
+#[cfg_attr(test, derive(PartialEq))]
+pub enum ErrorKind {
+ /// Whilst parsing as encountered an unexpected EOF.
+ #[error("Unexpected end of file")]
+ EndOfFile,
+ /// The shader specified an unsupported or invalid profile.
+ #[error("Invalid profile: {0}")]
+ InvalidProfile(String),
+ /// The shader requested an unsupported or invalid version.
+ #[error("Invalid version: {0}")]
+ InvalidVersion(u64),
+ /// Whilst parsing an unexpected token was encountered.
+ ///
+ /// A list of expected tokens is also returned.
+ #[error("Expected {}, found {0:?}", join_with_comma(.1))]
+ InvalidToken(TokenValue, Vec<ExpectedToken>),
+ /// A specific feature is not yet implemented.
+ ///
+ /// To help prioritize work please open an issue in the github issue tracker
+ /// if none exist already or react to the already existing one.
+ #[error("Not implemented: {0}")]
+ NotImplemented(&'static str),
+ /// A reference to a variable that wasn't declared was used.
+ #[error("Unknown variable: {0}")]
+ UnknownVariable(String),
+ /// A reference to a type that wasn't declared was used.
+ #[error("Unknown type: {0}")]
+ UnknownType(String),
+ /// A reference to a non existent member of a type was made.
+ #[error("Unknown field: {0}")]
+ UnknownField(String),
+ /// An unknown layout qualifier was used.
+ ///
+ /// If the qualifier does exist please open an issue in the github issue tracker
+ /// if none exist already or react to the already existing one to help
+ /// prioritize work.
+ #[error("Unknown layout qualifier: {0}")]
+ UnknownLayoutQualifier(String),
+ /// Unsupported matrix of the form matCx2
+ ///
+ /// Our IR expects matrices of the form matCx2 to have a stride of 8 however
+ /// matrices in the std140 layout have a stride of at least 16
+ #[error("unsupported matrix of the form matCx2 in std140 block layout")]
+ UnsupportedMatrixTypeInStd140,
+ /// A variable with the same name already exists in the current scope.
+ #[error("Variable already declared: {0}")]
+ VariableAlreadyDeclared(String),
+ /// A semantic error was detected in the shader.
+ #[error("{0}")]
+ SemanticError(Cow<'static, str>),
+ /// An error was returned by the preprocessor.
+ #[error("{0:?}")]
+ PreprocessorError(PreprocessorError),
+ /// The parser entered an illegal state and exited
+ ///
+ /// This obviously is a bug and as such should be reported in the github issue tracker
+ #[error("Internal error: {0}")]
+ InternalError(&'static str),
+}
+
+impl From<ConstantSolvingError> for ErrorKind {
+ fn from(err: ConstantSolvingError) -> Self {
+ ErrorKind::SemanticError(err.to_string().into())
+ }
+}
+
+/// Error returned during shader parsing.
+#[derive(Debug, Error)]
+#[error("{kind}")]
+#[cfg_attr(test, derive(PartialEq))]
+pub struct Error {
+ /// Holds the information about the error itself.
+ pub kind: ErrorKind,
+ /// Holds information about the range of the source code where the error happened.
+ pub meta: Span,
+}
diff --git a/third_party/rust/naga/src/front/glsl/functions.rs b/third_party/rust/naga/src/front/glsl/functions.rs
new file mode 100644
index 0000000000..10c964b5e0
--- /dev/null
+++ b/third_party/rust/naga/src/front/glsl/functions.rs
@@ -0,0 +1,1680 @@
+use super::{
+ ast::*,
+ builtins::{inject_builtin, sampled_to_depth},
+ context::{Context, ExprPos, StmtContext},
+ error::{Error, ErrorKind},
+ types::scalar_components,
+ Frontend, Result,
+};
+use crate::{
+ front::glsl::types::type_power, proc::ensure_block_returns, AddressSpace, Arena, Block,
+ Constant, ConstantInner, EntryPoint, Expression, Function, FunctionArgument, FunctionResult,
+ Handle, LocalVariable, ScalarKind, ScalarValue, Span, Statement, StructMember, Type, TypeInner,
+};
+use std::iter;
+
+/// Struct detailing a store operation that must happen after a function call
+struct ProxyWrite {
+ /// The store target
+ target: Handle<Expression>,
+ /// A pointer to read the value of the store
+ value: Handle<Expression>,
+ /// An optional conversion to be applied
+ convert: Option<(ScalarKind, crate::Bytes)>,
+}
+
+impl Frontend {
+ fn add_constant_value(
+ &mut self,
+ scalar_kind: ScalarKind,
+ value: u64,
+ meta: Span,
+ ) -> Handle<Constant> {
+ let value = match scalar_kind {
+ ScalarKind::Uint => ScalarValue::Uint(value),
+ ScalarKind::Sint => ScalarValue::Sint(value as i64),
+ ScalarKind::Float => ScalarValue::Float(value as f64),
+ ScalarKind::Bool => unreachable!(),
+ };
+
+ self.module.constants.fetch_or_append(
+ Constant {
+ name: None,
+ specialization: None,
+ inner: ConstantInner::Scalar { width: 4, value },
+ },
+ meta,
+ )
+ }
+
+ pub(crate) fn function_or_constructor_call(
+ &mut self,
+ ctx: &mut Context,
+ stmt: &StmtContext,
+ body: &mut Block,
+ fc: FunctionCallKind,
+ raw_args: &[Handle<HirExpr>],
+ meta: Span,
+ ) -> Result<Option<Handle<Expression>>> {
+ let args: Vec<_> = raw_args
+ .iter()
+ .map(|e| ctx.lower_expect_inner(stmt, self, *e, ExprPos::Rhs, body))
+ .collect::<Result<_>>()?;
+
+ match fc {
+ FunctionCallKind::TypeConstructor(ty) => {
+ if args.len() == 1 {
+ self.constructor_single(ctx, body, ty, args[0], meta)
+ .map(Some)
+ } else {
+ self.constructor_many(ctx, body, ty, args, meta).map(Some)
+ }
+ }
+ FunctionCallKind::Function(name) => {
+ self.function_call(ctx, stmt, body, name, args, raw_args, meta)
+ }
+ }
+ }
+
+ fn constructor_single(
+ &mut self,
+ ctx: &mut Context,
+ body: &mut Block,
+ ty: Handle<Type>,
+ (mut value, expr_meta): (Handle<Expression>, Span),
+ meta: Span,
+ ) -> Result<Handle<Expression>> {
+ let expr_type = self.resolve_type(ctx, value, expr_meta)?;
+
+ let vector_size = match *expr_type {
+ TypeInner::Vector { size, .. } => Some(size),
+ _ => None,
+ };
+
+ // Special case: if casting from a bool, we need to use Select and not As.
+ match self.module.types[ty].inner.scalar_kind() {
+ Some(result_scalar_kind)
+ if expr_type.scalar_kind() == Some(ScalarKind::Bool)
+ && result_scalar_kind != ScalarKind::Bool =>
+ {
+ let c0 = self.add_constant_value(result_scalar_kind, 0u64, meta);
+ let c1 = self.add_constant_value(result_scalar_kind, 1u64, meta);
+ let mut reject = ctx.add_expression(Expression::Constant(c0), expr_meta, body);
+ let mut accept = ctx.add_expression(Expression::Constant(c1), expr_meta, body);
+
+ ctx.implicit_splat(self, &mut reject, meta, vector_size)?;
+ ctx.implicit_splat(self, &mut accept, meta, vector_size)?;
+
+ let h = ctx.add_expression(
+ Expression::Select {
+ accept,
+ reject,
+ condition: value,
+ },
+ expr_meta,
+ body,
+ );
+
+ return Ok(h);
+ }
+ _ => {}
+ }
+
+ Ok(match self.module.types[ty].inner {
+ TypeInner::Vector { size, kind, width } if vector_size.is_none() => {
+ ctx.forced_conversion(self, &mut value, expr_meta, kind, width)?;
+
+ if let TypeInner::Scalar { .. } = *self.resolve_type(ctx, value, expr_meta)? {
+ ctx.add_expression(Expression::Splat { size, value }, meta, body)
+ } else {
+ self.vector_constructor(
+ ctx,
+ body,
+ ty,
+ size,
+ kind,
+ width,
+ &[(value, expr_meta)],
+ meta,
+ )?
+ }
+ }
+ TypeInner::Scalar { kind, width } => {
+ let mut expr = value;
+ if let TypeInner::Vector { .. } | TypeInner::Matrix { .. } =
+ *self.resolve_type(ctx, value, expr_meta)?
+ {
+ expr = ctx.add_expression(
+ Expression::AccessIndex {
+ base: expr,
+ index: 0,
+ },
+ meta,
+ body,
+ );
+ }
+
+ if let TypeInner::Matrix { .. } = *self.resolve_type(ctx, value, expr_meta)? {
+ expr = ctx.add_expression(
+ Expression::AccessIndex {
+ base: expr,
+ index: 0,
+ },
+ meta,
+ body,
+ );
+ }
+
+ ctx.add_expression(
+ Expression::As {
+ kind,
+ expr,
+ convert: Some(width),
+ },
+ meta,
+ body,
+ )
+ }
+ TypeInner::Vector { size, kind, width } => {
+ if vector_size.map_or(true, |s| s != size) {
+ value = ctx.vector_resize(size, value, expr_meta, body);
+ }
+
+ ctx.add_expression(
+ Expression::As {
+ kind,
+ expr: value,
+ convert: Some(width),
+ },
+ meta,
+ body,
+ )
+ }
+ TypeInner::Matrix {
+ columns,
+ rows,
+ width,
+ } => self.matrix_one_arg(
+ ctx,
+ body,
+ ty,
+ columns,
+ rows,
+ width,
+ (value, expr_meta),
+ meta,
+ )?,
+ TypeInner::Struct { ref members, .. } => {
+ let scalar_components = members
+ .get(0)
+ .and_then(|member| scalar_components(&self.module.types[member.ty].inner));
+ if let Some((kind, width)) = scalar_components {
+ ctx.implicit_conversion(self, &mut value, expr_meta, kind, width)?;
+ }
+
+ ctx.add_expression(
+ Expression::Compose {
+ ty,
+ components: vec![value],
+ },
+ meta,
+ body,
+ )
+ }
+
+ TypeInner::Array { base, .. } => {
+ let scalar_components = scalar_components(&self.module.types[base].inner);
+ if let Some((kind, width)) = scalar_components {
+ ctx.implicit_conversion(self, &mut value, expr_meta, kind, width)?;
+ }
+
+ ctx.add_expression(
+ Expression::Compose {
+ ty,
+ components: vec![value],
+ },
+ meta,
+ body,
+ )
+ }
+ _ => {
+ self.errors.push(Error {
+ kind: ErrorKind::SemanticError("Bad type constructor".into()),
+ meta,
+ });
+
+ value
+ }
+ })
+ }
+
+ #[allow(clippy::too_many_arguments)]
+ fn matrix_one_arg(
+ &mut self,
+ ctx: &mut Context,
+ body: &mut Block,
+ ty: Handle<Type>,
+ columns: crate::VectorSize,
+ rows: crate::VectorSize,
+ width: crate::Bytes,
+ (mut value, expr_meta): (Handle<Expression>, Span),
+ meta: Span,
+ ) -> Result<Handle<Expression>> {
+ let mut components = Vec::with_capacity(columns as usize);
+ // TODO: casts
+ // `Expression::As` doesn't support matrix width
+ // casts so we need to do some extra work for casts
+
+ ctx.forced_conversion(self, &mut value, expr_meta, ScalarKind::Float, width)?;
+ match *self.resolve_type(ctx, value, expr_meta)? {
+ TypeInner::Scalar { .. } => {
+ // If a matrix is constructed with a single scalar value, then that
+ // value is used to initialize all the values along the diagonal of
+ // the matrix; the rest are given zeros.
+ let vector_ty = self.module.types.insert(
+ Type {
+ name: None,
+ inner: TypeInner::Vector {
+ size: rows,
+ kind: ScalarKind::Float,
+ width,
+ },
+ },
+ meta,
+ );
+ let zero_constant = self.module.constants.fetch_or_append(
+ Constant {
+ name: None,
+ specialization: None,
+ inner: ConstantInner::Scalar {
+ width,
+ value: ScalarValue::Float(0.0),
+ },
+ },
+ meta,
+ );
+ let zero = ctx.add_expression(Expression::Constant(zero_constant), meta, body);
+
+ for i in 0..columns as u32 {
+ components.push(
+ ctx.add_expression(
+ Expression::Compose {
+ ty: vector_ty,
+ components: (0..rows as u32)
+ .map(|r| match r == i {
+ true => value,
+ false => zero,
+ })
+ .collect(),
+ },
+ meta,
+ body,
+ ),
+ )
+ }
+ }
+ TypeInner::Matrix {
+ rows: ori_rows,
+ columns: ori_cols,
+ ..
+ } => {
+ // If a matrix is constructed from a matrix, then each component
+ // (column i, row j) in the result that has a corresponding component
+ // (column i, row j) in the argument will be initialized from there. All
+ // other components will be initialized to the identity matrix.
+
+ let zero_constant = self.module.constants.fetch_or_append(
+ Constant {
+ name: None,
+ specialization: None,
+ inner: ConstantInner::Scalar {
+ width,
+ value: ScalarValue::Float(0.0),
+ },
+ },
+ meta,
+ );
+ let zero = ctx.add_expression(Expression::Constant(zero_constant), meta, body);
+ let one_constant = self.module.constants.fetch_or_append(
+ Constant {
+ name: None,
+ specialization: None,
+ inner: ConstantInner::Scalar {
+ width,
+ value: ScalarValue::Float(1.0),
+ },
+ },
+ meta,
+ );
+ let one = ctx.add_expression(Expression::Constant(one_constant), meta, body);
+ let vector_ty = self.module.types.insert(
+ Type {
+ name: None,
+ inner: TypeInner::Vector {
+ size: rows,
+ kind: ScalarKind::Float,
+ width,
+ },
+ },
+ meta,
+ );
+
+ for i in 0..columns as u32 {
+ if i < ori_cols as u32 {
+ use std::cmp::Ordering;
+
+ let vector = ctx.add_expression(
+ Expression::AccessIndex {
+ base: value,
+ index: i,
+ },
+ meta,
+ body,
+ );
+
+ components.push(match ori_rows.cmp(&rows) {
+ Ordering::Less => {
+ let components = (0..rows as u32)
+ .map(|r| {
+ if r < ori_rows as u32 {
+ ctx.add_expression(
+ Expression::AccessIndex {
+ base: vector,
+ index: r,
+ },
+ meta,
+ body,
+ )
+ } else if r == i {
+ one
+ } else {
+ zero
+ }
+ })
+ .collect();
+
+ ctx.add_expression(
+ Expression::Compose {
+ ty: vector_ty,
+ components,
+ },
+ meta,
+ body,
+ )
+ }
+ Ordering::Equal => vector,
+ Ordering::Greater => ctx.vector_resize(rows, vector, meta, body),
+ })
+ } else {
+ let vec_constant = self.module.constants.fetch_or_append(
+ Constant {
+ name: None,
+ specialization: None,
+ inner: ConstantInner::Composite {
+ ty: vector_ty,
+ components: (0..rows as u32)
+ .map(|r| match r == i {
+ true => one_constant,
+ false => zero_constant,
+ })
+ .collect(),
+ },
+ },
+ meta,
+ );
+ let vec =
+ ctx.add_expression(Expression::Constant(vec_constant), meta, body);
+
+ components.push(vec)
+ }
+ }
+ }
+ _ => {
+ components = iter::repeat(value).take(columns as usize).collect();
+ }
+ }
+
+ Ok(ctx.add_expression(Expression::Compose { ty, components }, meta, body))
+ }
+
+ #[allow(clippy::too_many_arguments)]
+ fn vector_constructor(
+ &mut self,
+ ctx: &mut Context,
+ body: &mut Block,
+ ty: Handle<Type>,
+ size: crate::VectorSize,
+ kind: ScalarKind,
+ width: crate::Bytes,
+ args: &[(Handle<Expression>, Span)],
+ meta: Span,
+ ) -> Result<Handle<Expression>> {
+ let mut components = Vec::with_capacity(size as usize);
+
+ for (mut arg, expr_meta) in args.iter().copied() {
+ ctx.forced_conversion(self, &mut arg, expr_meta, kind, width)?;
+
+ if components.len() >= size as usize {
+ break;
+ }
+
+ match *self.resolve_type(ctx, arg, expr_meta)? {
+ TypeInner::Scalar { .. } => components.push(arg),
+ TypeInner::Matrix { rows, columns, .. } => {
+ components.reserve(rows as usize * columns as usize);
+ for c in 0..(columns as u32) {
+ let base = ctx.add_expression(
+ Expression::AccessIndex {
+ base: arg,
+ index: c,
+ },
+ expr_meta,
+ body,
+ );
+ for r in 0..(rows as u32) {
+ components.push(ctx.add_expression(
+ Expression::AccessIndex { base, index: r },
+ expr_meta,
+ body,
+ ))
+ }
+ }
+ }
+ TypeInner::Vector { size: ori_size, .. } => {
+ components.reserve(ori_size as usize);
+ for index in 0..(ori_size as u32) {
+ components.push(ctx.add_expression(
+ Expression::AccessIndex { base: arg, index },
+ expr_meta,
+ body,
+ ))
+ }
+ }
+ _ => components.push(arg),
+ }
+ }
+
+ components.truncate(size as usize);
+
+ Ok(ctx.add_expression(Expression::Compose { ty, components }, meta, body))
+ }
+
+ fn constructor_many(
+ &mut self,
+ ctx: &mut Context,
+ body: &mut Block,
+ ty: Handle<Type>,
+ args: Vec<(Handle<Expression>, Span)>,
+ meta: Span,
+ ) -> Result<Handle<Expression>> {
+ let mut components = Vec::with_capacity(args.len());
+
+ match self.module.types[ty].inner {
+ TypeInner::Matrix {
+ columns,
+ rows,
+ width,
+ } => {
+ let mut flattened = Vec::with_capacity(columns as usize * rows as usize);
+
+ for (mut arg, meta) in args.iter().copied() {
+ ctx.forced_conversion(self, &mut arg, meta, ScalarKind::Float, width)?;
+
+ match *self.resolve_type(ctx, arg, meta)? {
+ TypeInner::Vector { size, .. } => {
+ for i in 0..(size as u32) {
+ flattened.push(ctx.add_expression(
+ Expression::AccessIndex {
+ base: arg,
+ index: i,
+ },
+ meta,
+ body,
+ ))
+ }
+ }
+ _ => flattened.push(arg),
+ }
+ }
+
+ let ty = self.module.types.insert(
+ Type {
+ name: None,
+ inner: TypeInner::Vector {
+ size: rows,
+ kind: ScalarKind::Float,
+ width,
+ },
+ },
+ meta,
+ );
+
+ for chunk in flattened.chunks(rows as usize) {
+ components.push(ctx.add_expression(
+ Expression::Compose {
+ ty,
+ components: Vec::from(chunk),
+ },
+ meta,
+ body,
+ ))
+ }
+ }
+ TypeInner::Vector { size, kind, width } => {
+ return self.vector_constructor(ctx, body, ty, size, kind, width, &args, meta)
+ }
+ TypeInner::Array { base, .. } => {
+ for (mut arg, meta) in args.iter().copied() {
+ let scalar_components = scalar_components(&self.module.types[base].inner);
+ if let Some((kind, width)) = scalar_components {
+ ctx.implicit_conversion(self, &mut arg, meta, kind, width)?;
+ }
+
+ components.push(arg)
+ }
+ }
+ TypeInner::Struct { ref members, .. } => {
+ for ((mut arg, meta), member) in args.iter().copied().zip(members.iter()) {
+ let scalar_components = scalar_components(&self.module.types[member.ty].inner);
+ if let Some((kind, width)) = scalar_components {
+ ctx.implicit_conversion(self, &mut arg, meta, kind, width)?;
+ }
+
+ components.push(arg)
+ }
+ }
+ _ => {
+ return Err(Error {
+ kind: ErrorKind::SemanticError("Constructor: Too many arguments".into()),
+ meta,
+ })
+ }
+ }
+
+ Ok(ctx.add_expression(Expression::Compose { ty, components }, meta, body))
+ }
+
+ #[allow(clippy::too_many_arguments)]
+ fn function_call(
+ &mut self,
+ ctx: &mut Context,
+ stmt: &StmtContext,
+ body: &mut Block,
+ name: String,
+ args: Vec<(Handle<Expression>, Span)>,
+ raw_args: &[Handle<HirExpr>],
+ meta: Span,
+ ) -> Result<Option<Handle<Expression>>> {
+ // Grow the typifier to be able to index it later without needing
+ // to hold the context mutably
+ for &(expr, span) in args.iter() {
+ self.typifier_grow(ctx, expr, span)?;
+ }
+
+ // Check if the passed arguments require any special variations
+ let mut variations = builtin_required_variations(
+ args.iter()
+ .map(|&(expr, _)| ctx.typifier.get(expr, &self.module.types)),
+ );
+
+ // Initiate the declaration if it wasn't previously initialized and inject builtins
+ let declaration = self.lookup_function.entry(name.clone()).or_insert_with(|| {
+ variations |= BuiltinVariations::STANDARD;
+ Default::default()
+ });
+ inject_builtin(declaration, &mut self.module, &name, variations);
+
+ // Borrow again but without mutability, at this point a declaration is guaranteed
+ let declaration = self.lookup_function.get(&name).unwrap();
+
+ // Possibly contains the overload to be used in the call
+ let mut maybe_overload = None;
+ // The conversions needed for the best analyzed overload, this is initialized all to
+ // `NONE` to make sure that conversions always pass the first time without ambiguity
+ let mut old_conversions = vec![Conversion::None; args.len()];
+ // Tracks whether the comparison between overloads lead to an ambiguity
+ let mut ambiguous = false;
+
+ // Iterate over all the available overloads to select either an exact match or a
+ // overload which has suitable implicit conversions
+ 'outer: for (overload_idx, overload) in declaration.overloads.iter().enumerate() {
+ // If the overload and the function call don't have the same number of arguments
+ // continue to the next overload
+ if args.len() != overload.parameters.len() {
+ continue;
+ }
+
+ log::trace!("Testing overload {}", overload_idx);
+
+ // Stores whether the current overload matches exactly the function call
+ let mut exact = true;
+ // State of the selection
+ // If None we still don't know what is the best overload
+ // If Some(true) the new overload is better
+ // If Some(false) the old overload is better
+ let mut superior = None;
+ // Store the conversions for the current overload so that later they can replace the
+ // conversions used for querying the best overload
+ let mut new_conversions = vec![Conversion::None; args.len()];
+
+ // Loop through the overload parameters and check if the current overload is better
+ // compared to the previous best overload.
+ for (i, overload_parameter) in overload.parameters.iter().enumerate() {
+ let call_argument = &args[i];
+ let parameter_info = &overload.parameters_info[i];
+
+ // If the image is used in the overload as a depth texture convert it
+ // before comparing, otherwise exact matches wouldn't be reported
+ if parameter_info.depth {
+ sampled_to_depth(
+ &mut self.module,
+ ctx,
+ call_argument.0,
+ call_argument.1,
+ &mut self.errors,
+ );
+ self.invalidate_expression(ctx, call_argument.0, call_argument.1)?
+ }
+
+ let overload_param_ty = &self.module.types[*overload_parameter].inner;
+ let call_arg_ty = self.resolve_type(ctx, call_argument.0, call_argument.1)?;
+
+ log::trace!(
+ "Testing parameter {}\n\tOverload = {:?}\n\tCall = {:?}",
+ i,
+ overload_param_ty,
+ call_arg_ty
+ );
+
+ // Storage images cannot be directly compared since while the access is part of the
+ // type in naga's IR, in glsl they are a qualifier and don't enter in the match as
+ // long as the access needed is satisfied.
+ if let (
+ &TypeInner::Image {
+ class:
+ crate::ImageClass::Storage {
+ format: overload_format,
+ access: overload_access,
+ },
+ dim: overload_dim,
+ arrayed: overload_arrayed,
+ },
+ &TypeInner::Image {
+ class:
+ crate::ImageClass::Storage {
+ format: call_format,
+ access: call_access,
+ },
+ dim: call_dim,
+ arrayed: call_arrayed,
+ },
+ ) = (overload_param_ty, call_arg_ty)
+ {
+ // Images size must match otherwise the overload isn't what we want
+ let good_size = call_dim == overload_dim && call_arrayed == overload_arrayed;
+ // Glsl requires the formats to strictly match unless you are builtin
+ // function overload and have not been replaced, in which case we only
+ // check that the format scalar kind matches
+ let good_format = overload_format == call_format
+ || (overload.internal
+ && ScalarKind::from(overload_format) == ScalarKind::from(call_format));
+ if !(good_size && good_format) {
+ continue 'outer;
+ }
+
+ // While storage access mismatch is an error it isn't one that causes
+ // the overload matching to fail so we defer the error and consider
+ // that the images match exactly
+ if !call_access.contains(overload_access) {
+ self.errors.push(Error {
+ kind: ErrorKind::SemanticError(
+ format!(
+ "'{name}': image needs {overload_access:?} access but only {call_access:?} was provided"
+ )
+ .into(),
+ ),
+ meta,
+ });
+ }
+
+ // The images satisfy the conditions to be considered as an exact match
+ new_conversions[i] = Conversion::Exact;
+ continue;
+ } else if overload_param_ty == call_arg_ty {
+ // If the types match there's no need to check for conversions so continue
+ new_conversions[i] = Conversion::Exact;
+ continue;
+ }
+
+ // Glsl defines that inout follows both the conversions for input parameters and
+ // output parameters, this means that the type must have a conversion from both the
+ // call argument to the function parameter and the function parameter to the call
+ // argument, the only way this is possible is for the conversion to be an identity
+ // (i.e. call argument = function parameter)
+ if let ParameterQualifier::InOut = parameter_info.qualifier {
+ continue 'outer;
+ }
+
+ // The function call argument and the function definition
+ // parameter are not equal at this point, so we need to try
+ // implicit conversions.
+ //
+ // Now there are two cases, the argument is defined as a normal
+ // parameter (`in` or `const`), in this case an implicit
+ // conversion is made from the calling argument to the
+ // definition argument. If the parameter is `out` the
+ // opposite needs to be done, so the implicit conversion is made
+ // from the definition argument to the calling argument.
+ let maybe_conversion = if parameter_info.qualifier.is_lhs() {
+ conversion(call_arg_ty, overload_param_ty)
+ } else {
+ conversion(overload_param_ty, call_arg_ty)
+ };
+
+ let conversion = match maybe_conversion {
+ Some(info) => info,
+ None => continue 'outer,
+ };
+
+ // At this point a conversion will be needed so the overload no longer
+ // exactly matches the call arguments
+ exact = false;
+
+ // Compare the conversions needed for this overload parameter to that of the
+ // last overload analyzed respective parameter, the value is:
+ // - `true` when the new overload argument has a better conversion
+ // - `false` when the old overload argument has a better conversion
+ let best_arg = match (conversion, old_conversions[i]) {
+ // An exact match is always better, we don't need to check this for the
+ // current overload since it was checked earlier
+ (_, Conversion::Exact) => false,
+ // No overload was yet analyzed so this one is the best yet
+ (_, Conversion::None) => true,
+ // A conversion from a float to a double is the best possible conversion
+ (Conversion::FloatToDouble, _) => true,
+ (_, Conversion::FloatToDouble) => false,
+ // A conversion from a float to an integer is preferred than one
+ // from double to an integer
+ (Conversion::IntToFloat, Conversion::IntToDouble) => true,
+ (Conversion::IntToDouble, Conversion::IntToFloat) => false,
+ // This case handles things like no conversion and exact which were already
+ // treated and other cases which no conversion is better than the other
+ _ => continue,
+ };
+
+ // Check if the best parameter corresponds to the current selected overload
+ // to pass to the next comparison, if this isn't true mark it as ambiguous
+ match best_arg {
+ true => match superior {
+ Some(false) => ambiguous = true,
+ _ => {
+ superior = Some(true);
+ new_conversions[i] = conversion
+ }
+ },
+ false => match superior {
+ Some(true) => ambiguous = true,
+ _ => superior = Some(false),
+ },
+ }
+ }
+
+ // The overload matches exactly the function call so there's no ambiguity (since
+ // repeated overload aren't allowed) and the current overload is selected, no
+ // further querying is needed.
+ if exact {
+ maybe_overload = Some(overload);
+ ambiguous = false;
+ break;
+ }
+
+ match superior {
+ // New overload is better keep it
+ Some(true) => {
+ maybe_overload = Some(overload);
+ // Replace the conversions
+ old_conversions = new_conversions;
+ }
+ // Old overload is better do nothing
+ Some(false) => {}
+ // No overload was better than the other this can be caused
+ // when all conversions are ambiguous in which the overloads themselves are
+ // ambiguous.
+ None => {
+ ambiguous = true;
+ // Assign the new overload, this helps ensures that in this case of
+ // ambiguity the parsing won't end immediately and allow for further
+ // collection of errors.
+ maybe_overload = Some(overload);
+ }
+ }
+ }
+
+ if ambiguous {
+ self.errors.push(Error {
+ kind: ErrorKind::SemanticError(
+ format!("Ambiguous best function for '{name}'").into(),
+ ),
+ meta,
+ })
+ }
+
+ let overload = maybe_overload.ok_or_else(|| Error {
+ kind: ErrorKind::SemanticError(format!("Unknown function '{name}'").into()),
+ meta,
+ })?;
+
+ let parameters_info = overload.parameters_info.clone();
+ let parameters = overload.parameters.clone();
+ let is_void = overload.void;
+ let kind = overload.kind;
+
+ let mut arguments = Vec::with_capacity(args.len());
+ let mut proxy_writes = Vec::new();
+
+ // Iterate through the function call arguments applying transformations as needed
+ for (((parameter_info, call_argument), expr), parameter) in parameters_info
+ .iter()
+ .zip(&args)
+ .zip(raw_args)
+ .zip(&parameters)
+ {
+ let (mut handle, meta) =
+ ctx.lower_expect_inner(stmt, self, *expr, parameter_info.qualifier.as_pos(), body)?;
+
+ if parameter_info.qualifier.is_lhs() {
+ self.process_lhs_argument(
+ ctx,
+ body,
+ meta,
+ *parameter,
+ parameter_info,
+ handle,
+ call_argument,
+ &mut proxy_writes,
+ &mut arguments,
+ )?;
+
+ continue;
+ }
+
+ let scalar_comps = scalar_components(&self.module.types[*parameter].inner);
+
+ // Apply implicit conversions as needed
+ if let Some((kind, width)) = scalar_comps {
+ ctx.implicit_conversion(self, &mut handle, meta, kind, width)?;
+ }
+
+ arguments.push(handle)
+ }
+
+ match kind {
+ FunctionKind::Call(function) => {
+ ctx.emit_end(body);
+
+ let result = if !is_void {
+ Some(ctx.add_expression(Expression::CallResult(function), meta, body))
+ } else {
+ None
+ };
+
+ body.push(
+ crate::Statement::Call {
+ function,
+ arguments,
+ result,
+ },
+ meta,
+ );
+
+ ctx.emit_start();
+
+ // Write back all the variables that were scheduled to their original place
+ for proxy_write in proxy_writes {
+ let mut value = ctx.add_expression(
+ Expression::Load {
+ pointer: proxy_write.value,
+ },
+ meta,
+ body,
+ );
+
+ if let Some((kind, width)) = proxy_write.convert {
+ ctx.conversion(&mut value, meta, kind, width)?;
+ }
+
+ ctx.emit_restart(body);
+
+ body.push(
+ Statement::Store {
+ pointer: proxy_write.target,
+ value,
+ },
+ meta,
+ );
+ }
+
+ Ok(result)
+ }
+ FunctionKind::Macro(builtin) => {
+ builtin.call(self, ctx, body, arguments.as_mut_slice(), meta)
+ }
+ }
+ }
+
+ /// Processes a function call argument that appears in place of an output
+ /// parameter.
+ #[allow(clippy::too_many_arguments)]
+ fn process_lhs_argument(
+ &mut self,
+ ctx: &mut Context,
+ body: &mut Block,
+ meta: Span,
+ parameter_ty: Handle<Type>,
+ parameter_info: &ParameterInfo,
+ original: Handle<Expression>,
+ call_argument: &(Handle<Expression>, Span),
+ proxy_writes: &mut Vec<ProxyWrite>,
+ arguments: &mut Vec<Handle<Expression>>,
+ ) -> Result<()> {
+ let original_ty = self.resolve_type(ctx, original, meta)?;
+ let original_pointer_space = original_ty.pointer_space();
+
+ // The type of a possible spill variable needed for a proxy write
+ let mut maybe_ty = match *original_ty {
+ // If the argument is to be passed as a pointer but the type of the
+ // expression returns a vector it must mean that it was for example
+ // swizzled and it must be spilled into a local before calling
+ TypeInner::Vector { size, kind, width } => Some(self.module.types.insert(
+ Type {
+ name: None,
+ inner: TypeInner::Vector { size, kind, width },
+ },
+ Span::default(),
+ )),
+ // If the argument is a pointer whose address space isn't `Function`, an
+ // indirection through a local variable is needed to align the address
+ // spaces of the call argument and the overload parameter.
+ TypeInner::Pointer { base, space } if space != AddressSpace::Function => Some(base),
+ TypeInner::ValuePointer {
+ size,
+ kind,
+ width,
+ space,
+ } if space != AddressSpace::Function => {
+ let inner = match size {
+ Some(size) => TypeInner::Vector { size, kind, width },
+ None => TypeInner::Scalar { kind, width },
+ };
+
+ Some(
+ self.module
+ .types
+ .insert(Type { name: None, inner }, Span::default()),
+ )
+ }
+ _ => None,
+ };
+
+ // Since the original expression might be a pointer and we want a value
+ // for the proxy writes, we might need to load the pointer.
+ let value = if original_pointer_space.is_some() {
+ ctx.add_expression(
+ Expression::Load { pointer: original },
+ Span::default(),
+ body,
+ )
+ } else {
+ original
+ };
+
+ let call_arg_ty = self.resolve_type(ctx, call_argument.0, call_argument.1)?;
+ let overload_param_ty = &self.module.types[parameter_ty].inner;
+ let needs_conversion = call_arg_ty != overload_param_ty;
+
+ let arg_scalar_comps = scalar_components(call_arg_ty);
+
+ // Since output parameters also allow implicit conversions from the
+ // parameter to the argument, we need to spill the conversion to a
+ // variable and create a proxy write for the original variable.
+ if needs_conversion {
+ maybe_ty = Some(parameter_ty);
+ }
+
+ if let Some(ty) = maybe_ty {
+ // Create the spill variable
+ let spill_var = ctx.locals.append(
+ LocalVariable {
+ name: None,
+ ty,
+ init: None,
+ },
+ Span::default(),
+ );
+ let spill_expr =
+ ctx.add_expression(Expression::LocalVariable(spill_var), Span::default(), body);
+
+ // If the argument is also copied in we must store the value of the
+ // original variable to the spill variable.
+ if let ParameterQualifier::InOut = parameter_info.qualifier {
+ body.push(
+ Statement::Store {
+ pointer: spill_expr,
+ value,
+ },
+ Span::default(),
+ );
+ }
+
+ // Add the spill variable as an argument to the function call
+ arguments.push(spill_expr);
+
+ let convert = if needs_conversion {
+ arg_scalar_comps
+ } else {
+ None
+ };
+
+ // Register the temporary local to be written back to it's original
+ // place after the function call
+ if let Expression::Swizzle {
+ size,
+ mut vector,
+ pattern,
+ } = ctx.expressions[original]
+ {
+ if let Expression::Load { pointer } = ctx.expressions[vector] {
+ vector = pointer;
+ }
+
+ for (i, component) in pattern.iter().take(size as usize).enumerate() {
+ let original = ctx.add_expression(
+ Expression::AccessIndex {
+ base: vector,
+ index: *component as u32,
+ },
+ Span::default(),
+ body,
+ );
+
+ let spill_component = ctx.add_expression(
+ Expression::AccessIndex {
+ base: spill_expr,
+ index: i as u32,
+ },
+ Span::default(),
+ body,
+ );
+
+ proxy_writes.push(ProxyWrite {
+ target: original,
+ value: spill_component,
+ convert,
+ });
+ }
+ } else {
+ proxy_writes.push(ProxyWrite {
+ target: original,
+ value: spill_expr,
+ convert,
+ });
+ }
+ } else {
+ arguments.push(original);
+ }
+
+ Ok(())
+ }
+
+ pub(crate) fn add_function(
+ &mut self,
+ ctx: Context,
+ name: String,
+ result: Option<FunctionResult>,
+ mut body: Block,
+ meta: Span,
+ ) {
+ ensure_block_returns(&mut body);
+
+ let void = result.is_none();
+
+ let &mut Frontend {
+ ref mut lookup_function,
+ ref mut module,
+ ..
+ } = self;
+
+ // Check if the passed arguments require any special variations
+ let mut variations =
+ builtin_required_variations(ctx.parameters.iter().map(|&arg| &module.types[arg].inner));
+
+ // Initiate the declaration if it wasn't previously initialized and inject builtins
+ let declaration = lookup_function.entry(name.clone()).or_insert_with(|| {
+ variations |= BuiltinVariations::STANDARD;
+ Default::default()
+ });
+ inject_builtin(declaration, module, &name, variations);
+
+ let Context {
+ expressions,
+ locals,
+ arguments,
+ parameters,
+ parameters_info,
+ ..
+ } = ctx;
+
+ let function = Function {
+ name: Some(name),
+ arguments,
+ result,
+ local_variables: locals,
+ expressions,
+ named_expressions: crate::NamedExpressions::default(),
+ body,
+ };
+
+ 'outer: for decl in declaration.overloads.iter_mut() {
+ if parameters.len() != decl.parameters.len() {
+ continue;
+ }
+
+ for (new_parameter, old_parameter) in parameters.iter().zip(decl.parameters.iter()) {
+ let new_inner = &module.types[*new_parameter].inner;
+ let old_inner = &module.types[*old_parameter].inner;
+
+ if new_inner != old_inner {
+ continue 'outer;
+ }
+ }
+
+ if decl.defined {
+ return self.errors.push(Error {
+ kind: ErrorKind::SemanticError("Function already defined".into()),
+ meta,
+ });
+ }
+
+ decl.defined = true;
+ decl.parameters_info = parameters_info;
+ match decl.kind {
+ FunctionKind::Call(handle) => *self.module.functions.get_mut(handle) = function,
+ FunctionKind::Macro(_) => {
+ let handle = module.functions.append(function, meta);
+ decl.kind = FunctionKind::Call(handle)
+ }
+ }
+ return;
+ }
+
+ let handle = module.functions.append(function, meta);
+ declaration.overloads.push(Overload {
+ parameters,
+ parameters_info,
+ kind: FunctionKind::Call(handle),
+ defined: true,
+ internal: false,
+ void,
+ });
+ }
+
+ pub(crate) fn add_prototype(
+ &mut self,
+ ctx: Context,
+ name: String,
+ result: Option<FunctionResult>,
+ meta: Span,
+ ) {
+ let void = result.is_none();
+
+ let &mut Frontend {
+ ref mut lookup_function,
+ ref mut module,
+ ..
+ } = self;
+
+ // Check if the passed arguments require any special variations
+ let mut variations =
+ builtin_required_variations(ctx.parameters.iter().map(|&arg| &module.types[arg].inner));
+
+ // Initiate the declaration if it wasn't previously initialized and inject builtins
+ let declaration = lookup_function.entry(name.clone()).or_insert_with(|| {
+ variations |= BuiltinVariations::STANDARD;
+ Default::default()
+ });
+ inject_builtin(declaration, module, &name, variations);
+
+ let Context {
+ arguments,
+ parameters,
+ parameters_info,
+ ..
+ } = ctx;
+
+ let function = Function {
+ name: Some(name),
+ arguments,
+ result,
+ ..Default::default()
+ };
+
+ 'outer: for decl in declaration.overloads.iter() {
+ if parameters.len() != decl.parameters.len() {
+ continue;
+ }
+
+ for (new_parameter, old_parameter) in parameters.iter().zip(decl.parameters.iter()) {
+ let new_inner = &module.types[*new_parameter].inner;
+ let old_inner = &module.types[*old_parameter].inner;
+
+ if new_inner != old_inner {
+ continue 'outer;
+ }
+ }
+
+ return self.errors.push(Error {
+ kind: ErrorKind::SemanticError("Prototype already defined".into()),
+ meta,
+ });
+ }
+
+ let handle = module.functions.append(function, meta);
+ declaration.overloads.push(Overload {
+ parameters,
+ parameters_info,
+ kind: FunctionKind::Call(handle),
+ defined: false,
+ internal: false,
+ void,
+ });
+ }
+
+ /// Helper function for building the input/output interface of the entry point
+ ///
+ /// Calls `f` with the data of the entry point argument, flattening composite types
+ /// recursively
+ ///
+ /// The passed arguments to the callback are:
+ /// - The name
+ /// - The pointer expression to the global storage
+ /// - The handle to the type of the entry point argument
+ /// - The binding of the entry point argument
+ /// - The expression arena
+ fn arg_type_walker(
+ &self,
+ name: Option<String>,
+ binding: crate::Binding,
+ pointer: Handle<Expression>,
+ ty: Handle<Type>,
+ expressions: &mut Arena<Expression>,
+ f: &mut impl FnMut(
+ Option<String>,
+ Handle<Expression>,
+ Handle<Type>,
+ crate::Binding,
+ &mut Arena<Expression>,
+ ),
+ ) {
+ match self.module.types[ty].inner {
+ TypeInner::Array {
+ base,
+ size: crate::ArraySize::Constant(constant),
+ ..
+ } => {
+ let mut location = match binding {
+ crate::Binding::Location { location, .. } => location,
+ crate::Binding::BuiltIn(_) => return,
+ };
+
+ // TODO: Better error reporting
+ // right now we just don't walk the array if the size isn't known at
+ // compile time and let validation catch it
+ let size = match self.module.constants[constant].to_array_length() {
+ Some(val) => val,
+ None => return f(name, pointer, ty, binding, expressions),
+ };
+
+ let interpolation =
+ self.module.types[base]
+ .inner
+ .scalar_kind()
+ .map(|kind| match kind {
+ ScalarKind::Float => crate::Interpolation::Perspective,
+ _ => crate::Interpolation::Flat,
+ });
+
+ for index in 0..size {
+ let member_pointer = expressions.append(
+ Expression::AccessIndex {
+ base: pointer,
+ index,
+ },
+ crate::Span::default(),
+ );
+
+ let binding = crate::Binding::Location {
+ location,
+ interpolation,
+ sampling: None,
+ };
+ location += 1;
+
+ self.arg_type_walker(
+ name.clone(),
+ binding,
+ member_pointer,
+ base,
+ expressions,
+ f,
+ )
+ }
+ }
+ TypeInner::Struct { ref members, .. } => {
+ let mut location = match binding {
+ crate::Binding::Location { location, .. } => location,
+ crate::Binding::BuiltIn(_) => return,
+ };
+
+ for (i, member) in members.iter().enumerate() {
+ let member_pointer = expressions.append(
+ Expression::AccessIndex {
+ base: pointer,
+ index: i as u32,
+ },
+ crate::Span::default(),
+ );
+
+ let binding = match member.binding.clone() {
+ Some(binding) => binding,
+ None => {
+ let interpolation = self.module.types[member.ty]
+ .inner
+ .scalar_kind()
+ .map(|kind| match kind {
+ ScalarKind::Float => crate::Interpolation::Perspective,
+ _ => crate::Interpolation::Flat,
+ });
+ let binding = crate::Binding::Location {
+ location,
+ interpolation,
+ sampling: None,
+ };
+ location += 1;
+ binding
+ }
+ };
+
+ self.arg_type_walker(
+ member.name.clone(),
+ binding,
+ member_pointer,
+ member.ty,
+ expressions,
+ f,
+ )
+ }
+ }
+ _ => f(name, pointer, ty, binding, expressions),
+ }
+ }
+
+ pub(crate) fn add_entry_point(
+ &mut self,
+ function: Handle<Function>,
+ global_init_body: Block,
+ mut expressions: Arena<Expression>,
+ ) {
+ let mut arguments = Vec::new();
+ let mut body = Block::with_capacity(
+ // global init body
+ global_init_body.len() +
+ // prologue and epilogue
+ self.entry_args.len() * 2
+ // Call, Emit for composing struct and return
+ + 3,
+ );
+
+ for arg in self.entry_args.iter() {
+ if arg.storage != StorageQualifier::Input {
+ continue;
+ }
+
+ let pointer =
+ expressions.append(Expression::GlobalVariable(arg.handle), Default::default());
+
+ self.arg_type_walker(
+ arg.name.clone(),
+ arg.binding.clone(),
+ pointer,
+ self.module.global_variables[arg.handle].ty,
+ &mut expressions,
+ &mut |name, pointer, ty, binding, expressions| {
+ let idx = arguments.len() as u32;
+
+ arguments.push(FunctionArgument {
+ name,
+ ty,
+ binding: Some(binding),
+ });
+
+ let value =
+ expressions.append(Expression::FunctionArgument(idx), Default::default());
+ body.push(Statement::Store { pointer, value }, Default::default());
+ },
+ )
+ }
+
+ body.extend_block(global_init_body);
+
+ body.push(
+ Statement::Call {
+ function,
+ arguments: Vec::new(),
+ result: None,
+ },
+ Default::default(),
+ );
+
+ let mut span = 0;
+ let mut members = Vec::new();
+ let mut components = Vec::new();
+
+ for arg in self.entry_args.iter() {
+ if arg.storage != StorageQualifier::Output {
+ continue;
+ }
+
+ let pointer =
+ expressions.append(Expression::GlobalVariable(arg.handle), Default::default());
+
+ self.arg_type_walker(
+ arg.name.clone(),
+ arg.binding.clone(),
+ pointer,
+ self.module.global_variables[arg.handle].ty,
+ &mut expressions,
+ &mut |name, pointer, ty, binding, expressions| {
+ members.push(StructMember {
+ name,
+ ty,
+ binding: Some(binding),
+ offset: span,
+ });
+
+ span += self.module.types[ty].inner.size(&self.module.constants);
+
+ let len = expressions.len();
+ let load = expressions.append(Expression::Load { pointer }, Default::default());
+ body.push(
+ Statement::Emit(expressions.range_from(len)),
+ Default::default(),
+ );
+ components.push(load)
+ },
+ )
+ }
+
+ let (ty, value) = if !components.is_empty() {
+ let ty = self.module.types.insert(
+ Type {
+ name: None,
+ inner: TypeInner::Struct { members, span },
+ },
+ Default::default(),
+ );
+
+ let len = expressions.len();
+ let res =
+ expressions.append(Expression::Compose { ty, components }, Default::default());
+ body.push(
+ Statement::Emit(expressions.range_from(len)),
+ Default::default(),
+ );
+
+ (Some(ty), Some(res))
+ } else {
+ (None, None)
+ };
+
+ body.push(Statement::Return { value }, Default::default());
+
+ self.module.entry_points.push(EntryPoint {
+ name: "main".to_string(),
+ stage: self.meta.stage,
+ early_depth_test: Some(crate::EarlyDepthTest { conservative: None })
+ .filter(|_| self.meta.early_fragment_tests),
+ workgroup_size: self.meta.workgroup_size,
+ function: Function {
+ arguments,
+ expressions,
+ body,
+ result: ty.map(|ty| FunctionResult { ty, binding: None }),
+ ..Default::default()
+ },
+ });
+ }
+}
+
+/// Helper enum containing the type of conversion need for a call
+#[derive(PartialEq, Eq, Clone, Copy, Debug)]
+enum Conversion {
+ /// No conversion needed
+ Exact,
+ /// Float to double conversion needed
+ FloatToDouble,
+ /// Int or uint to float conversion needed
+ IntToFloat,
+ /// Int or uint to double conversion needed
+ IntToDouble,
+ /// Other type of conversion needed
+ Other,
+ /// No conversion was yet registered
+ None,
+}
+
+/// Helper function, returns the type of conversion from `source` to `target`, if a
+/// conversion is not possible returns None.
+fn conversion(target: &TypeInner, source: &TypeInner) -> Option<Conversion> {
+ use ScalarKind::*;
+
+ // Gather the `ScalarKind` and scalar width from both the target and the source
+ let (target_kind, target_width, source_kind, source_width) = match (target, source) {
+ // Conversions between scalars are allowed
+ (
+ &TypeInner::Scalar {
+ kind: tgt_kind,
+ width: tgt_width,
+ },
+ &TypeInner::Scalar {
+ kind: src_kind,
+ width: src_width,
+ },
+ ) => (tgt_kind, tgt_width, src_kind, src_width),
+ // Conversions between vectors of the same size are allowed
+ (
+ &TypeInner::Vector {
+ kind: tgt_kind,
+ size: tgt_size,
+ width: tgt_width,
+ },
+ &TypeInner::Vector {
+ kind: src_kind,
+ size: src_size,
+ width: src_width,
+ },
+ ) if tgt_size == src_size => (tgt_kind, tgt_width, src_kind, src_width),
+ // Conversions between matrices of the same size are allowed
+ (
+ &TypeInner::Matrix {
+ rows: tgt_rows,
+ columns: tgt_cols,
+ width: tgt_width,
+ },
+ &TypeInner::Matrix {
+ rows: src_rows,
+ columns: src_cols,
+ width: src_width,
+ },
+ ) if tgt_cols == src_cols && tgt_rows == src_rows => (Float, tgt_width, Float, src_width),
+ _ => return None,
+ };
+
+ // Check if source can be converted into target, if this is the case then the type
+ // power of target must be higher than that of source
+ let target_power = type_power(target_kind, target_width);
+ let source_power = type_power(source_kind, source_width);
+ if target_power < source_power {
+ return None;
+ }
+
+ Some(
+ match ((target_kind, target_width), (source_kind, source_width)) {
+ // A conversion from a float to a double is special
+ ((Float, 8), (Float, 4)) => Conversion::FloatToDouble,
+ // A conversion from an integer to a float is special
+ ((Float, 4), (Sint | Uint, _)) => Conversion::IntToFloat,
+ // A conversion from an integer to a double is special
+ ((Float, 8), (Sint | Uint, _)) => Conversion::IntToDouble,
+ _ => Conversion::Other,
+ },
+ )
+}
+
+/// Helper method returning all the non standard builtin variations needed
+/// to process the function call with the passed arguments
+fn builtin_required_variations<'a>(args: impl Iterator<Item = &'a TypeInner>) -> BuiltinVariations {
+ let mut variations = BuiltinVariations::empty();
+
+ for ty in args {
+ match *ty {
+ TypeInner::ValuePointer { kind, width, .. }
+ | TypeInner::Scalar { kind, width }
+ | TypeInner::Vector { kind, width, .. } => {
+ if kind == ScalarKind::Float && width == 8 {
+ variations |= BuiltinVariations::DOUBLE
+ }
+ }
+ TypeInner::Matrix { width, .. } => {
+ if width == 8 {
+ variations |= BuiltinVariations::DOUBLE
+ }
+ }
+ TypeInner::Image {
+ dim,
+ arrayed,
+ class,
+ } => {
+ if dim == crate::ImageDimension::Cube && arrayed {
+ variations |= BuiltinVariations::CUBE_TEXTURES_ARRAY
+ }
+
+ if dim == crate::ImageDimension::D2 && arrayed && class.is_multisampled() {
+ variations |= BuiltinVariations::D2_MULTI_TEXTURES_ARRAY
+ }
+ }
+ _ => {}
+ }
+ }
+
+ variations
+}
diff --git a/third_party/rust/naga/src/front/glsl/lex.rs b/third_party/rust/naga/src/front/glsl/lex.rs
new file mode 100644
index 0000000000..1b59a9bf3e
--- /dev/null
+++ b/third_party/rust/naga/src/front/glsl/lex.rs
@@ -0,0 +1,301 @@
+use super::{
+ ast::Precision,
+ token::{Directive, DirectiveKind, Token, TokenValue},
+ types::parse_type,
+};
+use crate::{FastHashMap, Span, StorageAccess};
+use pp_rs::{
+ pp::Preprocessor,
+ token::{PreprocessorError, Punct, TokenValue as PPTokenValue},
+};
+
+#[derive(Debug)]
+#[cfg_attr(test, derive(PartialEq))]
+pub struct LexerResult {
+ pub kind: LexerResultKind,
+ pub meta: Span,
+}
+
+#[derive(Debug)]
+#[cfg_attr(test, derive(PartialEq))]
+pub enum LexerResultKind {
+ Token(Token),
+ Directive(Directive),
+ Error(PreprocessorError),
+}
+
+pub struct Lexer<'a> {
+ pp: Preprocessor<'a>,
+}
+
+impl<'a> Lexer<'a> {
+ pub fn new(input: &'a str, defines: &'a FastHashMap<String, String>) -> Self {
+ let mut pp = Preprocessor::new(input);
+ for (define, value) in defines {
+ pp.add_define(define, value).unwrap(); //TODO: handle error
+ }
+ Lexer { pp }
+ }
+}
+
+impl<'a> Iterator for Lexer<'a> {
+ type Item = LexerResult;
+ fn next(&mut self) -> Option<Self::Item> {
+ let pp_token = match self.pp.next()? {
+ Ok(t) => t,
+ Err((err, loc)) => {
+ return Some(LexerResult {
+ kind: LexerResultKind::Error(err),
+ meta: loc.into(),
+ });
+ }
+ };
+
+ let meta = pp_token.location.into();
+ let value = match pp_token.value {
+ PPTokenValue::Extension(extension) => {
+ return Some(LexerResult {
+ kind: LexerResultKind::Directive(Directive {
+ kind: DirectiveKind::Extension,
+ tokens: extension.tokens,
+ }),
+ meta,
+ })
+ }
+ PPTokenValue::Float(float) => TokenValue::FloatConstant(float),
+ PPTokenValue::Ident(ident) => {
+ match ident.as_str() {
+ // Qualifiers
+ "layout" => TokenValue::Layout,
+ "in" => TokenValue::In,
+ "out" => TokenValue::Out,
+ "uniform" => TokenValue::Uniform,
+ "buffer" => TokenValue::Buffer,
+ "shared" => TokenValue::Shared,
+ "invariant" => TokenValue::Invariant,
+ "flat" => TokenValue::Interpolation(crate::Interpolation::Flat),
+ "noperspective" => TokenValue::Interpolation(crate::Interpolation::Linear),
+ "smooth" => TokenValue::Interpolation(crate::Interpolation::Perspective),
+ "centroid" => TokenValue::Sampling(crate::Sampling::Centroid),
+ "sample" => TokenValue::Sampling(crate::Sampling::Sample),
+ "const" => TokenValue::Const,
+ "inout" => TokenValue::InOut,
+ "precision" => TokenValue::Precision,
+ "highp" => TokenValue::PrecisionQualifier(Precision::High),
+ "mediump" => TokenValue::PrecisionQualifier(Precision::Medium),
+ "lowp" => TokenValue::PrecisionQualifier(Precision::Low),
+ "restrict" => TokenValue::Restrict,
+ "readonly" => TokenValue::MemoryQualifier(StorageAccess::LOAD),
+ "writeonly" => TokenValue::MemoryQualifier(StorageAccess::STORE),
+ // values
+ "true" => TokenValue::BoolConstant(true),
+ "false" => TokenValue::BoolConstant(false),
+ // jump statements
+ "continue" => TokenValue::Continue,
+ "break" => TokenValue::Break,
+ "return" => TokenValue::Return,
+ "discard" => TokenValue::Discard,
+ // selection statements
+ "if" => TokenValue::If,
+ "else" => TokenValue::Else,
+ "switch" => TokenValue::Switch,
+ "case" => TokenValue::Case,
+ "default" => TokenValue::Default,
+ // iteration statements
+ "while" => TokenValue::While,
+ "do" => TokenValue::Do,
+ "for" => TokenValue::For,
+ // types
+ "void" => TokenValue::Void,
+ "struct" => TokenValue::Struct,
+ word => match parse_type(word) {
+ Some(t) => TokenValue::TypeName(t),
+ None => TokenValue::Identifier(String::from(word)),
+ },
+ }
+ }
+ PPTokenValue::Integer(integer) => TokenValue::IntConstant(integer),
+ PPTokenValue::Punct(punct) => match punct {
+ // Compound assignments
+ Punct::AddAssign => TokenValue::AddAssign,
+ Punct::SubAssign => TokenValue::SubAssign,
+ Punct::MulAssign => TokenValue::MulAssign,
+ Punct::DivAssign => TokenValue::DivAssign,
+ Punct::ModAssign => TokenValue::ModAssign,
+ Punct::LeftShiftAssign => TokenValue::LeftShiftAssign,
+ Punct::RightShiftAssign => TokenValue::RightShiftAssign,
+ Punct::AndAssign => TokenValue::AndAssign,
+ Punct::XorAssign => TokenValue::XorAssign,
+ Punct::OrAssign => TokenValue::OrAssign,
+
+ // Two character punctuation
+ Punct::Increment => TokenValue::Increment,
+ Punct::Decrement => TokenValue::Decrement,
+ Punct::LogicalAnd => TokenValue::LogicalAnd,
+ Punct::LogicalOr => TokenValue::LogicalOr,
+ Punct::LogicalXor => TokenValue::LogicalXor,
+ Punct::LessEqual => TokenValue::LessEqual,
+ Punct::GreaterEqual => TokenValue::GreaterEqual,
+ Punct::EqualEqual => TokenValue::Equal,
+ Punct::NotEqual => TokenValue::NotEqual,
+ Punct::LeftShift => TokenValue::LeftShift,
+ Punct::RightShift => TokenValue::RightShift,
+
+ // Parenthesis or similar
+ Punct::LeftBrace => TokenValue::LeftBrace,
+ Punct::RightBrace => TokenValue::RightBrace,
+ Punct::LeftParen => TokenValue::LeftParen,
+ Punct::RightParen => TokenValue::RightParen,
+ Punct::LeftBracket => TokenValue::LeftBracket,
+ Punct::RightBracket => TokenValue::RightBracket,
+
+ // Other one character punctuation
+ Punct::LeftAngle => TokenValue::LeftAngle,
+ Punct::RightAngle => TokenValue::RightAngle,
+ Punct::Semicolon => TokenValue::Semicolon,
+ Punct::Comma => TokenValue::Comma,
+ Punct::Colon => TokenValue::Colon,
+ Punct::Dot => TokenValue::Dot,
+ Punct::Equal => TokenValue::Assign,
+ Punct::Bang => TokenValue::Bang,
+ Punct::Minus => TokenValue::Dash,
+ Punct::Tilde => TokenValue::Tilde,
+ Punct::Plus => TokenValue::Plus,
+ Punct::Star => TokenValue::Star,
+ Punct::Slash => TokenValue::Slash,
+ Punct::Percent => TokenValue::Percent,
+ Punct::Pipe => TokenValue::VerticalBar,
+ Punct::Caret => TokenValue::Caret,
+ Punct::Ampersand => TokenValue::Ampersand,
+ Punct::Question => TokenValue::Question,
+ },
+ PPTokenValue::Pragma(pragma) => {
+ return Some(LexerResult {
+ kind: LexerResultKind::Directive(Directive {
+ kind: DirectiveKind::Pragma,
+ tokens: pragma.tokens,
+ }),
+ meta,
+ })
+ }
+ PPTokenValue::Version(version) => {
+ return Some(LexerResult {
+ kind: LexerResultKind::Directive(Directive {
+ kind: DirectiveKind::Version {
+ is_first_directive: version.is_first_directive,
+ },
+ tokens: version.tokens,
+ }),
+ meta,
+ })
+ }
+ };
+
+ Some(LexerResult {
+ kind: LexerResultKind::Token(Token { value, meta }),
+ meta,
+ })
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use pp_rs::token::{Integer, Location, Token as PPToken, TokenValue as PPTokenValue};
+
+ use super::{
+ super::token::{Directive, DirectiveKind, Token, TokenValue},
+ Lexer, LexerResult, LexerResultKind,
+ };
+ use crate::Span;
+
+ #[test]
+ fn lex_tokens() {
+ let defines = crate::FastHashMap::default();
+
+ // line comments
+ let mut lex = Lexer::new("#version 450\nvoid main () {}", &defines);
+ let mut location = Location::default();
+ location.start = 9;
+ location.end = 12;
+ assert_eq!(
+ lex.next().unwrap(),
+ LexerResult {
+ kind: LexerResultKind::Directive(Directive {
+ kind: DirectiveKind::Version {
+ is_first_directive: true
+ },
+ tokens: vec![PPToken {
+ value: PPTokenValue::Integer(Integer {
+ signed: true,
+ value: 450,
+ width: 32
+ }),
+ location
+ }]
+ }),
+ meta: Span::new(1, 8)
+ }
+ );
+ assert_eq!(
+ lex.next().unwrap(),
+ LexerResult {
+ kind: LexerResultKind::Token(Token {
+ value: TokenValue::Void,
+ meta: Span::new(13, 17)
+ }),
+ meta: Span::new(13, 17)
+ }
+ );
+ assert_eq!(
+ lex.next().unwrap(),
+ LexerResult {
+ kind: LexerResultKind::Token(Token {
+ value: TokenValue::Identifier("main".into()),
+ meta: Span::new(18, 22)
+ }),
+ meta: Span::new(18, 22)
+ }
+ );
+ assert_eq!(
+ lex.next().unwrap(),
+ LexerResult {
+ kind: LexerResultKind::Token(Token {
+ value: TokenValue::LeftParen,
+ meta: Span::new(23, 24)
+ }),
+ meta: Span::new(23, 24)
+ }
+ );
+ assert_eq!(
+ lex.next().unwrap(),
+ LexerResult {
+ kind: LexerResultKind::Token(Token {
+ value: TokenValue::RightParen,
+ meta: Span::new(24, 25)
+ }),
+ meta: Span::new(24, 25)
+ }
+ );
+ assert_eq!(
+ lex.next().unwrap(),
+ LexerResult {
+ kind: LexerResultKind::Token(Token {
+ value: TokenValue::LeftBrace,
+ meta: Span::new(26, 27)
+ }),
+ meta: Span::new(26, 27)
+ }
+ );
+ assert_eq!(
+ lex.next().unwrap(),
+ LexerResult {
+ kind: LexerResultKind::Token(Token {
+ value: TokenValue::RightBrace,
+ meta: Span::new(27, 28)
+ }),
+ meta: Span::new(27, 28)
+ }
+ );
+ assert_eq!(lex.next(), None);
+ }
+}
diff --git a/third_party/rust/naga/src/front/glsl/mod.rs b/third_party/rust/naga/src/front/glsl/mod.rs
new file mode 100644
index 0000000000..a1aa4622d3
--- /dev/null
+++ b/third_party/rust/naga/src/front/glsl/mod.rs
@@ -0,0 +1,235 @@
+/*!
+Frontend for [GLSL][glsl] (OpenGL Shading Language).
+
+To begin, take a look at the documentation for the [`Frontend`].
+
+# Supported versions
+## Vulkan
+- 440 (partial)
+- 450
+- 460
+
+[glsl]: https://www.khronos.org/registry/OpenGL/index_gl.php
+*/
+
+pub use ast::{Precision, Profile};
+pub use error::{Error, ErrorKind, ExpectedToken};
+pub use token::TokenValue;
+
+use crate::{proc::Layouter, FastHashMap, FastHashSet, Handle, Module, ShaderStage, Span, Type};
+use ast::{EntryArg, FunctionDeclaration, GlobalLookup};
+use parser::ParsingContext;
+
+mod ast;
+mod builtins;
+mod constants;
+mod context;
+mod error;
+mod functions;
+mod lex;
+mod offset;
+mod parser;
+#[cfg(test)]
+mod parser_tests;
+mod token;
+mod types;
+mod variables;
+
+type Result<T> = std::result::Result<T, Error>;
+
+/// Per-shader options passed to [`parse`](Frontend::parse).
+///
+/// The [`From`](From) trait is implemented for [`ShaderStage`](ShaderStage) to
+/// provide a quick way to create a Options instance.
+/// ```rust
+/// # use naga::ShaderStage;
+/// # use naga::front::glsl::Options;
+/// Options::from(ShaderStage::Vertex);
+/// ```
+#[derive(Debug)]
+pub struct Options {
+ /// The shader stage in the pipeline.
+ pub stage: ShaderStage,
+ /// Preprocesor definitions to be used, akin to having
+ /// ```glsl
+ /// #define key value
+ /// ```
+ /// for each key value pair in the map.
+ pub defines: FastHashMap<String, String>,
+}
+
+impl From<ShaderStage> for Options {
+ fn from(stage: ShaderStage) -> Self {
+ Options {
+ stage,
+ defines: FastHashMap::default(),
+ }
+ }
+}
+
+/// Additional information about the GLSL shader.
+///
+/// Stores additional information about the GLSL shader which might not be
+/// stored in the shader [`Module`](Module).
+#[derive(Debug)]
+pub struct ShaderMetadata {
+ /// The GLSL version specified in the shader through the use of the
+ /// `#version` preprocessor directive.
+ pub version: u16,
+ /// The GLSL profile specified in the shader through the use of the
+ /// `#version` preprocessor directive.
+ pub profile: Profile,
+ /// The shader stage in the pipeline, passed to the [`parse`](Frontend::parse)
+ /// method via the [`Options`](Options) struct.
+ pub stage: ShaderStage,
+
+ /// The workgroup size for compute shaders, defaults to `[1; 3]` for
+ /// compute shaders and `[0; 3]` for non compute shaders.
+ pub workgroup_size: [u32; 3],
+ /// Whether or not early fragment tests where requested by the shader.
+ /// Defaults to `false`.
+ pub early_fragment_tests: bool,
+
+ /// The shader can request extensions via the
+ /// `#extension` preprocessor directive, in the directive a behavior
+ /// parameter is used to control whether the extension should be disabled,
+ /// warn on usage, enabled if possible or required.
+ ///
+ /// This field only stores extensions which were required or requested to
+ /// be enabled if possible and they are supported.
+ pub extensions: FastHashSet<String>,
+}
+
+impl ShaderMetadata {
+ fn reset(&mut self, stage: ShaderStage) {
+ self.version = 0;
+ self.profile = Profile::Core;
+ self.stage = stage;
+ self.workgroup_size = [u32::from(stage == ShaderStage::Compute); 3];
+ self.early_fragment_tests = false;
+ self.extensions.clear();
+ }
+}
+
+impl Default for ShaderMetadata {
+ fn default() -> Self {
+ ShaderMetadata {
+ version: 0,
+ profile: Profile::Core,
+ stage: ShaderStage::Vertex,
+ workgroup_size: [0; 3],
+ early_fragment_tests: false,
+ extensions: FastHashSet::default(),
+ }
+ }
+}
+
+/// The `Frontend` is the central structure of the GLSL frontend.
+///
+/// To instantiate a new `Frontend` the [`Default`](Default) trait is used, so a
+/// call to the associated function [`Frontend::default`](Frontend::default) will
+/// return a new `Frontend` instance.
+///
+/// To parse a shader simply call the [`parse`](Frontend::parse) method with a
+/// [`Options`](Options) struct and a [`&str`](str) holding the glsl code.
+///
+/// The `Frontend` also provides the [`metadata`](Frontend::metadata) to get some
+/// further information about the previously parsed shader, like version and
+/// extensions used (see the documentation for
+/// [`ShaderMetadata`](ShaderMetadata) to see all the returned information)
+///
+/// # Example usage
+/// ```rust
+/// use naga::ShaderStage;
+/// use naga::front::glsl::{Frontend, Options};
+///
+/// let glsl = r#"
+/// #version 450 core
+///
+/// void main() {}
+/// "#;
+///
+/// let mut frontend = Frontend::default();
+/// let options = Options::from(ShaderStage::Vertex);
+/// frontend.parse(&options, glsl);
+/// ```
+///
+/// # Reusability
+///
+/// If there's a need to parse more than one shader reusing the same `Frontend`
+/// instance may be beneficial since internal allocations will be reused.
+///
+/// Calling the [`parse`](Frontend::parse) method multiple times will reset the
+/// `Frontend` so no extra care is needed when reusing.
+#[derive(Debug, Default)]
+pub struct Frontend {
+ meta: ShaderMetadata,
+
+ lookup_function: FastHashMap<String, FunctionDeclaration>,
+ lookup_type: FastHashMap<String, Handle<Type>>,
+
+ global_variables: Vec<(String, GlobalLookup)>,
+
+ entry_args: Vec<EntryArg>,
+
+ layouter: Layouter,
+
+ errors: Vec<Error>,
+
+ module: Module,
+}
+
+impl Frontend {
+ fn reset(&mut self, stage: ShaderStage) {
+ self.meta.reset(stage);
+
+ self.lookup_function.clear();
+ self.lookup_type.clear();
+ self.global_variables.clear();
+ self.entry_args.clear();
+ self.layouter.clear();
+
+ // This is necessary because if the last parsing errored out, the module
+ // wouldn't have been taken
+ self.module = Module::default();
+ }
+
+ /// Parses a shader either outputting a shader [`Module`](Module) or a list
+ /// of [`Error`](Error)s.
+ ///
+ /// Multiple calls using the same `Frontend` and different shaders are supported.
+ pub fn parse(
+ &mut self,
+ options: &Options,
+ source: &str,
+ ) -> std::result::Result<Module, Vec<Error>> {
+ self.reset(options.stage);
+
+ let lexer = lex::Lexer::new(source, &options.defines);
+ let mut ctx = ParsingContext::new(lexer);
+
+ if let Err(e) = ctx.parse(self) {
+ self.errors.push(e);
+ }
+
+ if self.errors.is_empty() {
+ Ok(std::mem::take(&mut self.module))
+ } else {
+ Err(std::mem::take(&mut self.errors))
+ }
+ }
+
+ /// Returns additional information about the parsed shader which might not be
+ /// stored in the [`Module`](Module), see the documentation for
+ /// [`ShaderMetadata`](ShaderMetadata) for more information about the
+ /// returned data.
+ ///
+ /// # Notes
+ ///
+ /// Following an unsuccessful parsing the state of the returned information
+ /// is undefined, it might contain only partial information about the
+ /// current shader, the previous shader or both.
+ pub const fn metadata(&self) -> &ShaderMetadata {
+ &self.meta
+ }
+}
diff --git a/third_party/rust/naga/src/front/glsl/offset.rs b/third_party/rust/naga/src/front/glsl/offset.rs
new file mode 100644
index 0000000000..d7950489cb
--- /dev/null
+++ b/third_party/rust/naga/src/front/glsl/offset.rs
@@ -0,0 +1,173 @@
+/*!
+Module responsible for calculating the offset and span for types.
+
+There exists two types of layouts std140 and std430 (there's technically
+two more layouts, shared and packed. Shared is not supported by spirv. Packed is
+implementation dependent and for now it's just implemented as an alias to
+std140).
+
+The OpenGl spec (the layout rules are defined by the OpenGl spec in section
+7.6.2.2 as opposed to the GLSL spec) uses the term basic machine units which are
+equivalent to bytes.
+*/
+
+use super::{
+ ast::StructLayout,
+ error::{Error, ErrorKind},
+ Span,
+};
+use crate::{proc::Alignment, Arena, Constant, Handle, Type, TypeInner, UniqueArena};
+
+/// Struct with information needed for defining a struct member.
+///
+/// Returned by [`calculate_offset`](calculate_offset)
+#[derive(Debug)]
+pub struct TypeAlignSpan {
+ /// The handle to the type, this might be the same handle passed to
+ /// [`calculate_offset`](calculate_offset) or a new such a new array type
+ /// with a different stride set.
+ pub ty: Handle<Type>,
+ /// The alignment required by the type.
+ pub align: Alignment,
+ /// The size of the type.
+ pub span: u32,
+}
+
+/// Returns the type, alignment and span of a struct member according to a [`StructLayout`](StructLayout).
+///
+/// The functions returns a [`TypeAlignSpan`](TypeAlignSpan) which has a `ty` member
+/// this should be used as the struct member type because for example arrays may have to
+/// change the stride and as such need to have a different type.
+pub fn calculate_offset(
+ mut ty: Handle<Type>,
+ meta: Span,
+ layout: StructLayout,
+ types: &mut UniqueArena<Type>,
+ constants: &Arena<Constant>,
+ errors: &mut Vec<Error>,
+) -> TypeAlignSpan {
+ // When using the std430 storage layout, shader storage blocks will be laid out in buffer storage
+ // identically to uniform and shader storage blocks using the std140 layout, except
+ // that the base alignment and stride of arrays of scalars and vectors in rule 4 and of
+ // structures in rule 9 are not rounded up a multiple of the base alignment of a vec4.
+
+ let (align, span) = match types[ty].inner {
+ // 1. If the member is a scalar consuming N basic machine units,
+ // the base alignment is N.
+ TypeInner::Scalar { width, .. } => (Alignment::from_width(width), width as u32),
+ // 2. If the member is a two- or four-component vector with components
+ // consuming N basic machine units, the base alignment is 2N or 4N, respectively.
+ // 3. If the member is a three-component vector with components consuming N
+ // basic machine units, the base alignment is 4N.
+ TypeInner::Vector { size, width, .. } => (
+ Alignment::from(size) * Alignment::from_width(width),
+ size as u32 * width as u32,
+ ),
+ // 4. If the member is an array of scalars or vectors, the base alignment and array
+ // stride are set to match the base alignment of a single array element, according
+ // to rules (1), (2), and (3), and rounded up to the base alignment of a vec4.
+ // TODO: Matrices array
+ TypeInner::Array { base, size, .. } => {
+ let info = calculate_offset(base, meta, layout, types, constants, errors);
+
+ let name = types[ty].name.clone();
+
+ // See comment at the beginning of the function
+ let (align, stride) = if StructLayout::Std430 == layout {
+ (info.align, info.align.round_up(info.span))
+ } else {
+ let align = info.align.max(Alignment::MIN_UNIFORM);
+ (align, align.round_up(info.span))
+ };
+
+ let span = match size {
+ crate::ArraySize::Constant(s) => {
+ constants[s].to_array_length().unwrap_or(1) * stride
+ }
+ crate::ArraySize::Dynamic => stride,
+ };
+
+ let ty_span = types.get_span(ty);
+ ty = types.insert(
+ Type {
+ name,
+ inner: TypeInner::Array {
+ base: info.ty,
+ size,
+ stride,
+ },
+ },
+ ty_span,
+ );
+
+ (align, span)
+ }
+ // 5. If the member is a column-major matrix with C columns and R rows, the
+ // matrix is stored identically to an array of C column vectors with R
+ // components each, according to rule (4)
+ // TODO: Row major matrices
+ TypeInner::Matrix {
+ columns,
+ rows,
+ width,
+ } => {
+ let mut align = Alignment::from(rows) * Alignment::from_width(width);
+
+ // See comment at the beginning of the function
+ if StructLayout::Std430 != layout {
+ align = align.max(Alignment::MIN_UNIFORM);
+ }
+
+ // See comment on the error kind
+ if StructLayout::Std140 == layout && rows == crate::VectorSize::Bi {
+ errors.push(Error {
+ kind: ErrorKind::UnsupportedMatrixTypeInStd140,
+ meta,
+ });
+ }
+
+ (align, align * columns as u32)
+ }
+ TypeInner::Struct { ref members, .. } => {
+ let mut span = 0;
+ let mut align = Alignment::ONE;
+ let mut members = members.clone();
+ let name = types[ty].name.clone();
+
+ for member in members.iter_mut() {
+ let info = calculate_offset(member.ty, meta, layout, types, constants, errors);
+
+ let member_alignment = info.align;
+ span = member_alignment.round_up(span);
+ align = member_alignment.max(align);
+
+ member.ty = info.ty;
+ member.offset = span;
+
+ span += info.span;
+ }
+
+ span = align.round_up(span);
+
+ let ty_span = types.get_span(ty);
+ ty = types.insert(
+ Type {
+ name,
+ inner: TypeInner::Struct { members, span },
+ },
+ ty_span,
+ );
+
+ (align, span)
+ }
+ _ => {
+ errors.push(Error {
+ kind: ErrorKind::SemanticError("Invalid struct member type".into()),
+ meta,
+ });
+ (Alignment::ONE, 0)
+ }
+ };
+
+ TypeAlignSpan { ty, align, span }
+}
diff --git a/third_party/rust/naga/src/front/glsl/parser.rs b/third_party/rust/naga/src/front/glsl/parser.rs
new file mode 100644
index 0000000000..ed139e799d
--- /dev/null
+++ b/third_party/rust/naga/src/front/glsl/parser.rs
@@ -0,0 +1,448 @@
+use super::{
+ ast::{FunctionKind, Profile, TypeQualifiers},
+ context::{Context, ExprPos},
+ error::ExpectedToken,
+ error::{Error, ErrorKind},
+ lex::{Lexer, LexerResultKind},
+ token::{Directive, DirectiveKind},
+ token::{Token, TokenValue},
+ variables::{GlobalOrConstant, VarDeclaration},
+ Frontend, Result,
+};
+use crate::{arena::Handle, Block, Constant, ConstantInner, Expression, ScalarValue, Span, Type};
+use core::convert::TryFrom;
+use pp_rs::token::{PreprocessorError, Token as PPToken, TokenValue as PPTokenValue};
+use std::iter::Peekable;
+
+mod declarations;
+mod expressions;
+mod functions;
+mod types;
+
+pub struct ParsingContext<'source> {
+ lexer: Peekable<Lexer<'source>>,
+ /// Used to store tokens already consumed by the parser but that need to be backtracked
+ backtracked_token: Option<Token>,
+ last_meta: Span,
+}
+
+impl<'source> ParsingContext<'source> {
+ pub fn new(lexer: Lexer<'source>) -> Self {
+ ParsingContext {
+ lexer: lexer.peekable(),
+ backtracked_token: None,
+ last_meta: Span::default(),
+ }
+ }
+
+ /// Helper method for backtracking from a consumed token
+ ///
+ /// This method should always be used instead of assigning to `backtracked_token` since
+ /// it validates that backtracking hasn't occurred more than one time in a row
+ ///
+ /// # Panics
+ /// - If the parser already backtracked without bumping in between
+ pub fn backtrack(&mut self, token: Token) -> Result<()> {
+ // This should never happen
+ if let Some(ref prev_token) = self.backtracked_token {
+ return Err(Error {
+ kind: ErrorKind::InternalError("The parser tried to backtrack twice in a row"),
+ meta: prev_token.meta,
+ });
+ }
+
+ self.backtracked_token = Some(token);
+
+ Ok(())
+ }
+
+ pub fn expect_ident(&mut self, frontend: &mut Frontend) -> Result<(String, Span)> {
+ let token = self.bump(frontend)?;
+
+ match token.value {
+ TokenValue::Identifier(name) => Ok((name, token.meta)),
+ _ => Err(Error {
+ kind: ErrorKind::InvalidToken(token.value, vec![ExpectedToken::Identifier]),
+ meta: token.meta,
+ }),
+ }
+ }
+
+ pub fn expect(&mut self, frontend: &mut Frontend, value: TokenValue) -> Result<Token> {
+ let token = self.bump(frontend)?;
+
+ if token.value != value {
+ Err(Error {
+ kind: ErrorKind::InvalidToken(token.value, vec![value.into()]),
+ meta: token.meta,
+ })
+ } else {
+ Ok(token)
+ }
+ }
+
+ pub fn next(&mut self, frontend: &mut Frontend) -> Option<Token> {
+ loop {
+ if let Some(token) = self.backtracked_token.take() {
+ self.last_meta = token.meta;
+ break Some(token);
+ }
+
+ let res = self.lexer.next()?;
+
+ match res.kind {
+ LexerResultKind::Token(token) => {
+ self.last_meta = token.meta;
+ break Some(token);
+ }
+ LexerResultKind::Directive(directive) => {
+ frontend.handle_directive(directive, res.meta)
+ }
+ LexerResultKind::Error(error) => frontend.errors.push(Error {
+ kind: ErrorKind::PreprocessorError(error),
+ meta: res.meta,
+ }),
+ }
+ }
+ }
+
+ pub fn bump(&mut self, frontend: &mut Frontend) -> Result<Token> {
+ self.next(frontend).ok_or(Error {
+ kind: ErrorKind::EndOfFile,
+ meta: self.last_meta,
+ })
+ }
+
+ /// Returns None on the end of the file rather than an error like other methods
+ pub fn bump_if(&mut self, frontend: &mut Frontend, value: TokenValue) -> Option<Token> {
+ if self.peek(frontend).filter(|t| t.value == value).is_some() {
+ self.bump(frontend).ok()
+ } else {
+ None
+ }
+ }
+
+ pub fn peek(&mut self, frontend: &mut Frontend) -> Option<&Token> {
+ loop {
+ if let Some(ref token) = self.backtracked_token {
+ break Some(token);
+ }
+
+ match self.lexer.peek()?.kind {
+ LexerResultKind::Token(_) => {
+ let res = self.lexer.peek()?;
+
+ match res.kind {
+ LexerResultKind::Token(ref token) => break Some(token),
+ _ => unreachable!(),
+ }
+ }
+ LexerResultKind::Error(_) | LexerResultKind::Directive(_) => {
+ let res = self.lexer.next()?;
+
+ match res.kind {
+ LexerResultKind::Directive(directive) => {
+ frontend.handle_directive(directive, res.meta)
+ }
+ LexerResultKind::Error(error) => frontend.errors.push(Error {
+ kind: ErrorKind::PreprocessorError(error),
+ meta: res.meta,
+ }),
+ LexerResultKind::Token(_) => unreachable!(),
+ }
+ }
+ }
+ }
+ }
+
+ pub fn expect_peek(&mut self, frontend: &mut Frontend) -> Result<&Token> {
+ let meta = self.last_meta;
+ self.peek(frontend).ok_or(Error {
+ kind: ErrorKind::EndOfFile,
+ meta,
+ })
+ }
+
+ pub fn parse(&mut self, frontend: &mut Frontend) -> Result<()> {
+ // Body and expression arena for global initialization
+ let mut body = Block::new();
+ let mut ctx = Context::new(frontend, &mut body);
+
+ while self.peek(frontend).is_some() {
+ self.parse_external_declaration(frontend, &mut ctx, &mut body)?;
+ }
+
+ // Add an `EntryPoint` to `parser.module` for `main`, if a
+ // suitable overload exists. Error out if we can't find one.
+ if let Some(declaration) = frontend.lookup_function.get("main") {
+ for decl in declaration.overloads.iter() {
+ if let FunctionKind::Call(handle) = decl.kind {
+ if decl.defined && decl.parameters.is_empty() {
+ frontend.add_entry_point(handle, body, ctx.expressions);
+ return Ok(());
+ }
+ }
+ }
+ }
+
+ Err(Error {
+ kind: ErrorKind::SemanticError("Missing entry point".into()),
+ meta: Span::default(),
+ })
+ }
+
+ fn parse_uint_constant(&mut self, frontend: &mut Frontend) -> Result<(u32, Span)> {
+ let (value, meta) = self.parse_constant_expression(frontend)?;
+
+ let int = match frontend.module.constants[value].inner {
+ ConstantInner::Scalar {
+ value: ScalarValue::Uint(int),
+ ..
+ } => u32::try_from(int).map_err(|_| Error {
+ kind: ErrorKind::SemanticError("int constant overflows".into()),
+ meta,
+ })?,
+ ConstantInner::Scalar {
+ value: ScalarValue::Sint(int),
+ ..
+ } => u32::try_from(int).map_err(|_| Error {
+ kind: ErrorKind::SemanticError("int constant overflows".into()),
+ meta,
+ })?,
+ _ => {
+ return Err(Error {
+ kind: ErrorKind::SemanticError("Expected a uint constant".into()),
+ meta,
+ })
+ }
+ };
+
+ Ok((int, meta))
+ }
+
+ fn parse_constant_expression(
+ &mut self,
+ frontend: &mut Frontend,
+ ) -> Result<(Handle<Constant>, Span)> {
+ let mut block = Block::new();
+
+ let mut ctx = Context::new(frontend, &mut block);
+
+ let mut stmt_ctx = ctx.stmt_ctx();
+ let expr = self.parse_conditional(frontend, &mut ctx, &mut stmt_ctx, &mut block, None)?;
+ let (root, meta) = ctx.lower_expect(stmt_ctx, frontend, expr, ExprPos::Rhs, &mut block)?;
+
+ Ok((frontend.solve_constant(&ctx, root, meta)?, meta))
+ }
+}
+
+impl Frontend {
+ fn handle_directive(&mut self, directive: Directive, meta: Span) {
+ let mut tokens = directive.tokens.into_iter();
+
+ match directive.kind {
+ DirectiveKind::Version { is_first_directive } => {
+ if !is_first_directive {
+ self.errors.push(Error {
+ kind: ErrorKind::SemanticError(
+ "#version must occur first in shader".into(),
+ ),
+ meta,
+ })
+ }
+
+ match tokens.next() {
+ Some(PPToken {
+ value: PPTokenValue::Integer(int),
+ location,
+ }) => match int.value {
+ 440 | 450 | 460 => self.meta.version = int.value as u16,
+ _ => self.errors.push(Error {
+ kind: ErrorKind::InvalidVersion(int.value),
+ meta: location.into(),
+ }),
+ },
+ Some(PPToken { value, location }) => self.errors.push(Error {
+ kind: ErrorKind::PreprocessorError(PreprocessorError::UnexpectedToken(
+ value,
+ )),
+ meta: location.into(),
+ }),
+ None => self.errors.push(Error {
+ kind: ErrorKind::PreprocessorError(PreprocessorError::UnexpectedNewLine),
+ meta,
+ }),
+ };
+
+ match tokens.next() {
+ Some(PPToken {
+ value: PPTokenValue::Ident(name),
+ location,
+ }) => match name.as_str() {
+ "core" => self.meta.profile = Profile::Core,
+ _ => self.errors.push(Error {
+ kind: ErrorKind::InvalidProfile(name),
+ meta: location.into(),
+ }),
+ },
+ Some(PPToken { value, location }) => self.errors.push(Error {
+ kind: ErrorKind::PreprocessorError(PreprocessorError::UnexpectedToken(
+ value,
+ )),
+ meta: location.into(),
+ }),
+ None => {}
+ };
+
+ if let Some(PPToken { value, location }) = tokens.next() {
+ self.errors.push(Error {
+ kind: ErrorKind::PreprocessorError(PreprocessorError::UnexpectedToken(
+ value,
+ )),
+ meta: location.into(),
+ })
+ }
+ }
+ DirectiveKind::Extension => {
+ // TODO: Proper extension handling
+ // - Checking for extension support in the compiler
+ // - Handle behaviors such as warn
+ // - Handle the all extension
+ let name = match tokens.next() {
+ Some(PPToken {
+ value: PPTokenValue::Ident(name),
+ ..
+ }) => Some(name),
+ Some(PPToken { value, location }) => {
+ self.errors.push(Error {
+ kind: ErrorKind::PreprocessorError(PreprocessorError::UnexpectedToken(
+ value,
+ )),
+ meta: location.into(),
+ });
+
+ None
+ }
+ None => {
+ self.errors.push(Error {
+ kind: ErrorKind::PreprocessorError(
+ PreprocessorError::UnexpectedNewLine,
+ ),
+ meta,
+ });
+
+ None
+ }
+ };
+
+ match tokens.next() {
+ Some(PPToken {
+ value: PPTokenValue::Punct(pp_rs::token::Punct::Colon),
+ ..
+ }) => {}
+ Some(PPToken { value, location }) => self.errors.push(Error {
+ kind: ErrorKind::PreprocessorError(PreprocessorError::UnexpectedToken(
+ value,
+ )),
+ meta: location.into(),
+ }),
+ None => self.errors.push(Error {
+ kind: ErrorKind::PreprocessorError(PreprocessorError::UnexpectedNewLine),
+ meta,
+ }),
+ };
+
+ match tokens.next() {
+ Some(PPToken {
+ value: PPTokenValue::Ident(behavior),
+ location,
+ }) => match behavior.as_str() {
+ "require" | "enable" | "warn" | "disable" => {
+ if let Some(name) = name {
+ self.meta.extensions.insert(name);
+ }
+ }
+ _ => self.errors.push(Error {
+ kind: ErrorKind::PreprocessorError(PreprocessorError::UnexpectedToken(
+ PPTokenValue::Ident(behavior),
+ )),
+ meta: location.into(),
+ }),
+ },
+ Some(PPToken { value, location }) => self.errors.push(Error {
+ kind: ErrorKind::PreprocessorError(PreprocessorError::UnexpectedToken(
+ value,
+ )),
+ meta: location.into(),
+ }),
+ None => self.errors.push(Error {
+ kind: ErrorKind::PreprocessorError(PreprocessorError::UnexpectedNewLine),
+ meta,
+ }),
+ }
+
+ if let Some(PPToken { value, location }) = tokens.next() {
+ self.errors.push(Error {
+ kind: ErrorKind::PreprocessorError(PreprocessorError::UnexpectedToken(
+ value,
+ )),
+ meta: location.into(),
+ })
+ }
+ }
+ DirectiveKind::Pragma => {
+ // TODO: handle some common pragmas?
+ }
+ }
+ }
+}
+
+pub struct DeclarationContext<'ctx, 'qualifiers> {
+ qualifiers: TypeQualifiers<'qualifiers>,
+ /// Indicates a global declaration
+ external: bool,
+
+ ctx: &'ctx mut Context,
+ body: &'ctx mut Block,
+}
+
+impl<'ctx, 'qualifiers> DeclarationContext<'ctx, 'qualifiers> {
+ fn add_var(
+ &mut self,
+ frontend: &mut Frontend,
+ ty: Handle<Type>,
+ name: String,
+ init: Option<Handle<Constant>>,
+ meta: Span,
+ ) -> Result<Handle<Expression>> {
+ let decl = VarDeclaration {
+ qualifiers: &mut self.qualifiers,
+ ty,
+ name: Some(name),
+ init,
+ meta,
+ };
+
+ match self.external {
+ true => {
+ let global = frontend.add_global_var(self.ctx, self.body, decl)?;
+ let expr = match global {
+ GlobalOrConstant::Global(handle) => Expression::GlobalVariable(handle),
+ GlobalOrConstant::Constant(handle) => Expression::Constant(handle),
+ };
+ Ok(self.ctx.add_expression(expr, meta, self.body))
+ }
+ false => frontend.add_local_var(self.ctx, self.body, decl),
+ }
+ }
+
+ /// Emits all the expressions captured by the emitter and starts the emitter again
+ ///
+ /// Alias to [`emit_restart`] with the declaration body
+ ///
+ /// [`emit_restart`]: Context::emit_restart
+ #[inline]
+ fn flush_expressions(&mut self) {
+ self.ctx.emit_restart(self.body);
+ }
+}
diff --git a/third_party/rust/naga/src/front/glsl/parser/declarations.rs b/third_party/rust/naga/src/front/glsl/parser/declarations.rs
new file mode 100644
index 0000000000..9085469dda
--- /dev/null
+++ b/third_party/rust/naga/src/front/glsl/parser/declarations.rs
@@ -0,0 +1,671 @@
+use crate::{
+ front::glsl::{
+ ast::{
+ GlobalLookup, GlobalLookupKind, Precision, QualifierKey, QualifierValue,
+ StorageQualifier, StructLayout, TypeQualifiers,
+ },
+ context::{Context, ExprPos},
+ error::ExpectedToken,
+ offset,
+ token::{Token, TokenValue},
+ types::scalar_components,
+ variables::{GlobalOrConstant, VarDeclaration},
+ Error, ErrorKind, Frontend, Span,
+ },
+ proc::Alignment,
+ AddressSpace, Block, Expression, FunctionResult, Handle, ScalarKind, Statement, StructMember,
+ Type, TypeInner,
+};
+
+use super::{DeclarationContext, ParsingContext, Result};
+
+/// Helper method used to retrieve the child type of `ty` at
+/// index `i`.
+///
+/// # Note
+///
+/// Does not check if the index is valid and returns the same type
+/// when indexing out-of-bounds a struct or indexing a non indexable
+/// type.
+fn element_or_member_type(
+ ty: Handle<Type>,
+ i: usize,
+ types: &mut crate::UniqueArena<Type>,
+) -> Handle<Type> {
+ match types[ty].inner {
+ // The child type of a vector is a scalar of the same kind and width
+ TypeInner::Vector { kind, width, .. } => types.insert(
+ Type {
+ name: None,
+ inner: TypeInner::Scalar { kind, width },
+ },
+ Default::default(),
+ ),
+ // The child type of a matrix is a vector of floats with the same
+ // width and the size of the matrix rows.
+ TypeInner::Matrix { rows, width, .. } => types.insert(
+ Type {
+ name: None,
+ inner: TypeInner::Vector {
+ size: rows,
+ kind: ScalarKind::Float,
+ width,
+ },
+ },
+ Default::default(),
+ ),
+ // The child type of an array is the base type of the array
+ TypeInner::Array { base, .. } => base,
+ // The child type of a struct at index `i` is the type of it's
+ // member at that same index.
+ //
+ // In case the index is out of bounds the same type is returned
+ TypeInner::Struct { ref members, .. } => {
+ members.get(i).map(|member| member.ty).unwrap_or(ty)
+ }
+ // The type isn't indexable, the same type is returned
+ _ => ty,
+ }
+}
+
+impl<'source> ParsingContext<'source> {
+ pub fn parse_external_declaration(
+ &mut self,
+ frontend: &mut Frontend,
+ global_ctx: &mut Context,
+ global_body: &mut Block,
+ ) -> Result<()> {
+ if self
+ .parse_declaration(frontend, global_ctx, global_body, true)?
+ .is_none()
+ {
+ let token = self.bump(frontend)?;
+ match token.value {
+ TokenValue::Semicolon if frontend.meta.version == 460 => Ok(()),
+ _ => {
+ let expected = match frontend.meta.version {
+ 460 => vec![TokenValue::Semicolon.into(), ExpectedToken::Eof],
+ _ => vec![ExpectedToken::Eof],
+ };
+ Err(Error {
+ kind: ErrorKind::InvalidToken(token.value, expected),
+ meta: token.meta,
+ })
+ }
+ }
+ } else {
+ Ok(())
+ }
+ }
+
+ pub fn parse_initializer(
+ &mut self,
+ frontend: &mut Frontend,
+ ty: Handle<Type>,
+ ctx: &mut Context,
+ body: &mut Block,
+ ) -> Result<(Handle<Expression>, Span)> {
+ // initializer:
+ // assignment_expression
+ // LEFT_BRACE initializer_list RIGHT_BRACE
+ // LEFT_BRACE initializer_list COMMA RIGHT_BRACE
+ //
+ // initializer_list:
+ // initializer
+ // initializer_list COMMA initializer
+ if let Some(Token { mut meta, .. }) = self.bump_if(frontend, TokenValue::LeftBrace) {
+ // initializer_list
+ let mut components = Vec::new();
+ loop {
+ // The type expected to be parsed inside the initializer list
+ let new_ty =
+ element_or_member_type(ty, components.len(), &mut frontend.module.types);
+
+ components.push(self.parse_initializer(frontend, new_ty, ctx, body)?.0);
+
+ let token = self.bump(frontend)?;
+ match token.value {
+ TokenValue::Comma => {
+ if let Some(Token { meta: end_meta, .. }) =
+ self.bump_if(frontend, TokenValue::RightBrace)
+ {
+ meta.subsume(end_meta);
+ break;
+ }
+ }
+ TokenValue::RightBrace => {
+ meta.subsume(token.meta);
+ break;
+ }
+ _ => {
+ return Err(Error {
+ kind: ErrorKind::InvalidToken(
+ token.value,
+ vec![TokenValue::Comma.into(), TokenValue::RightBrace.into()],
+ ),
+ meta: token.meta,
+ })
+ }
+ }
+ }
+
+ Ok((
+ ctx.add_expression(Expression::Compose { ty, components }, meta, body),
+ meta,
+ ))
+ } else {
+ let mut stmt = ctx.stmt_ctx();
+ let expr = self.parse_assignment(frontend, ctx, &mut stmt, body)?;
+ let (mut init, init_meta) =
+ ctx.lower_expect(stmt, frontend, expr, ExprPos::Rhs, body)?;
+
+ let scalar_components = scalar_components(&frontend.module.types[ty].inner);
+ if let Some((kind, width)) = scalar_components {
+ ctx.implicit_conversion(frontend, &mut init, init_meta, kind, width)?;
+ }
+
+ Ok((init, init_meta))
+ }
+ }
+
+ // Note: caller preparsed the type and qualifiers
+ // Note: caller skips this if the fallthrough token is not expected to be consumed here so this
+ // produced Error::InvalidToken if it isn't consumed
+ pub fn parse_init_declarator_list(
+ &mut self,
+ frontend: &mut Frontend,
+ mut ty: Handle<Type>,
+ ctx: &mut DeclarationContext,
+ ) -> Result<()> {
+ // init_declarator_list:
+ // single_declaration
+ // init_declarator_list COMMA IDENTIFIER
+ // init_declarator_list COMMA IDENTIFIER array_specifier
+ // init_declarator_list COMMA IDENTIFIER array_specifier EQUAL initializer
+ // init_declarator_list COMMA IDENTIFIER EQUAL initializer
+ //
+ // single_declaration:
+ // fully_specified_type
+ // fully_specified_type IDENTIFIER
+ // fully_specified_type IDENTIFIER array_specifier
+ // fully_specified_type IDENTIFIER array_specifier EQUAL initializer
+ // fully_specified_type IDENTIFIER EQUAL initializer
+
+ // Consume any leading comma, e.g. this is valid: `float, a=1;`
+ if self
+ .peek(frontend)
+ .map_or(false, |t| t.value == TokenValue::Comma)
+ {
+ self.next(frontend);
+ }
+
+ loop {
+ let token = self.bump(frontend)?;
+ let name = match token.value {
+ TokenValue::Semicolon => break,
+ TokenValue::Identifier(name) => name,
+ _ => {
+ return Err(Error {
+ kind: ErrorKind::InvalidToken(
+ token.value,
+ vec![ExpectedToken::Identifier, TokenValue::Semicolon.into()],
+ ),
+ meta: token.meta,
+ })
+ }
+ };
+ let mut meta = token.meta;
+
+ // array_specifier
+ // array_specifier EQUAL initializer
+ // EQUAL initializer
+
+ // parse an array specifier if it exists
+ // NOTE: unlike other parse methods this one doesn't expect an array specifier and
+ // returns Ok(None) rather than an error if there is not one
+ self.parse_array_specifier(frontend, &mut meta, &mut ty)?;
+
+ let init = self
+ .bump_if(frontend, TokenValue::Assign)
+ .map::<Result<_>, _>(|_| {
+ let (mut expr, init_meta) =
+ self.parse_initializer(frontend, ty, ctx.ctx, ctx.body)?;
+
+ let scalar_components = scalar_components(&frontend.module.types[ty].inner);
+ if let Some((kind, width)) = scalar_components {
+ ctx.ctx
+ .implicit_conversion(frontend, &mut expr, init_meta, kind, width)?;
+ }
+
+ meta.subsume(init_meta);
+
+ Ok((expr, init_meta))
+ })
+ .transpose()?;
+
+ let is_const = ctx.qualifiers.storage.0 == StorageQualifier::Const;
+ let maybe_constant = if ctx.external {
+ if let Some((root, meta)) = init {
+ match frontend.solve_constant(ctx.ctx, root, meta) {
+ Ok(res) => Some(res),
+ // If the declaration is external (global scope) and is constant qualified
+ // then the initializer must be a constant expression
+ Err(err) if is_const => return Err(err),
+ _ => None,
+ }
+ } else {
+ None
+ }
+ } else {
+ None
+ };
+
+ let pointer = ctx.add_var(frontend, ty, name, maybe_constant, meta)?;
+
+ if let Some((value, _)) = init.filter(|_| maybe_constant.is_none()) {
+ ctx.flush_expressions();
+ ctx.body.push(Statement::Store { pointer, value }, meta);
+ }
+
+ let token = self.bump(frontend)?;
+ match token.value {
+ TokenValue::Semicolon => break,
+ TokenValue::Comma => {}
+ _ => {
+ return Err(Error {
+ kind: ErrorKind::InvalidToken(
+ token.value,
+ vec![TokenValue::Comma.into(), TokenValue::Semicolon.into()],
+ ),
+ meta: token.meta,
+ })
+ }
+ }
+ }
+
+ Ok(())
+ }
+
+ /// `external` whether or not we are in a global or local context
+ pub fn parse_declaration(
+ &mut self,
+ frontend: &mut Frontend,
+ ctx: &mut Context,
+ body: &mut Block,
+ external: bool,
+ ) -> Result<Option<Span>> {
+ //declaration:
+ // function_prototype SEMICOLON
+ //
+ // init_declarator_list SEMICOLON
+ // PRECISION precision_qualifier type_specifier SEMICOLON
+ //
+ // type_qualifier IDENTIFIER LEFT_BRACE struct_declaration_list RIGHT_BRACE SEMICOLON
+ // type_qualifier IDENTIFIER LEFT_BRACE struct_declaration_list RIGHT_BRACE IDENTIFIER SEMICOLON
+ // type_qualifier IDENTIFIER LEFT_BRACE struct_declaration_list RIGHT_BRACE IDENTIFIER array_specifier SEMICOLON
+ // type_qualifier SEMICOLON type_qualifier IDENTIFIER SEMICOLON
+ // type_qualifier IDENTIFIER identifier_list SEMICOLON
+
+ if self.peek_type_qualifier(frontend) || self.peek_type_name(frontend) {
+ let mut qualifiers = self.parse_type_qualifiers(frontend)?;
+
+ if self.peek_type_name(frontend) {
+ // This branch handles variables and function prototypes and if
+ // external is true also function definitions
+ let (ty, mut meta) = self.parse_type(frontend)?;
+
+ let token = self.bump(frontend)?;
+ let token_fallthrough = match token.value {
+ TokenValue::Identifier(name) => match self.expect_peek(frontend)?.value {
+ TokenValue::LeftParen => {
+ // This branch handles function definition and prototypes
+ self.bump(frontend)?;
+
+ let result = ty.map(|ty| FunctionResult { ty, binding: None });
+ let mut body = Block::new();
+
+ let mut context = Context::new(frontend, &mut body);
+
+ self.parse_function_args(frontend, &mut context, &mut body)?;
+
+ let end_meta = self.expect(frontend, TokenValue::RightParen)?.meta;
+ meta.subsume(end_meta);
+
+ let token = self.bump(frontend)?;
+ return match token.value {
+ TokenValue::Semicolon => {
+ // This branch handles function prototypes
+ frontend.add_prototype(context, name, result, meta);
+
+ Ok(Some(meta))
+ }
+ TokenValue::LeftBrace if external => {
+ // This branch handles function definitions
+ // as you can see by the guard this branch
+ // only happens if external is also true
+
+ // parse the body
+ self.parse_compound_statement(
+ token.meta,
+ frontend,
+ &mut context,
+ &mut body,
+ &mut None,
+ )?;
+
+ frontend.add_function(context, name, result, body, meta);
+
+ Ok(Some(meta))
+ }
+ _ if external => Err(Error {
+ kind: ErrorKind::InvalidToken(
+ token.value,
+ vec![
+ TokenValue::LeftBrace.into(),
+ TokenValue::Semicolon.into(),
+ ],
+ ),
+ meta: token.meta,
+ }),
+ _ => Err(Error {
+ kind: ErrorKind::InvalidToken(
+ token.value,
+ vec![TokenValue::Semicolon.into()],
+ ),
+ meta: token.meta,
+ }),
+ };
+ }
+ // Pass the token to the init_declarator_list parser
+ _ => Token {
+ value: TokenValue::Identifier(name),
+ meta: token.meta,
+ },
+ },
+ // Pass the token to the init_declarator_list parser
+ _ => token,
+ };
+
+ // If program execution has reached here then this will be a
+ // init_declarator_list
+ // token_fallthrough will have a token that was already bumped
+ if let Some(ty) = ty {
+ let mut ctx = DeclarationContext {
+ qualifiers,
+ external,
+ ctx,
+ body,
+ };
+
+ self.backtrack(token_fallthrough)?;
+ self.parse_init_declarator_list(frontend, ty, &mut ctx)?;
+ } else {
+ frontend.errors.push(Error {
+ kind: ErrorKind::SemanticError("Declaration cannot have void type".into()),
+ meta,
+ })
+ }
+
+ Ok(Some(meta))
+ } else {
+ // This branch handles struct definitions and modifiers like
+ // ```glsl
+ // layout(early_fragment_tests);
+ // ```
+ let token = self.bump(frontend)?;
+ match token.value {
+ TokenValue::Identifier(ty_name) => {
+ if self.bump_if(frontend, TokenValue::LeftBrace).is_some() {
+ self.parse_block_declaration(
+ frontend,
+ ctx,
+ body,
+ &mut qualifiers,
+ ty_name,
+ token.meta,
+ )
+ .map(Some)
+ } else {
+ if qualifiers.invariant.take().is_some() {
+ frontend.make_variable_invariant(ctx, body, &ty_name, token.meta);
+
+ qualifiers.unused_errors(&mut frontend.errors);
+ self.expect(frontend, TokenValue::Semicolon)?;
+ return Ok(Some(qualifiers.span));
+ }
+
+ //TODO: declaration
+ // type_qualifier IDENTIFIER SEMICOLON
+ // type_qualifier IDENTIFIER identifier_list SEMICOLON
+ Err(Error {
+ kind: ErrorKind::NotImplemented("variable qualifier"),
+ meta: token.meta,
+ })
+ }
+ }
+ TokenValue::Semicolon => {
+ if let Some(value) =
+ qualifiers.uint_layout_qualifier("local_size_x", &mut frontend.errors)
+ {
+ frontend.meta.workgroup_size[0] = value;
+ }
+ if let Some(value) =
+ qualifiers.uint_layout_qualifier("local_size_y", &mut frontend.errors)
+ {
+ frontend.meta.workgroup_size[1] = value;
+ }
+ if let Some(value) =
+ qualifiers.uint_layout_qualifier("local_size_z", &mut frontend.errors)
+ {
+ frontend.meta.workgroup_size[2] = value;
+ }
+
+ frontend.meta.early_fragment_tests |= qualifiers
+ .none_layout_qualifier("early_fragment_tests", &mut frontend.errors);
+
+ qualifiers.unused_errors(&mut frontend.errors);
+
+ Ok(Some(qualifiers.span))
+ }
+ _ => Err(Error {
+ kind: ErrorKind::InvalidToken(
+ token.value,
+ vec![ExpectedToken::Identifier, TokenValue::Semicolon.into()],
+ ),
+ meta: token.meta,
+ }),
+ }
+ }
+ } else {
+ match self.peek(frontend).map(|t| &t.value) {
+ Some(&TokenValue::Precision) => {
+ // PRECISION precision_qualifier type_specifier SEMICOLON
+ self.bump(frontend)?;
+
+ let token = self.bump(frontend)?;
+ let _ = match token.value {
+ TokenValue::PrecisionQualifier(p) => p,
+ _ => {
+ return Err(Error {
+ kind: ErrorKind::InvalidToken(
+ token.value,
+ vec![
+ TokenValue::PrecisionQualifier(Precision::High).into(),
+ TokenValue::PrecisionQualifier(Precision::Medium).into(),
+ TokenValue::PrecisionQualifier(Precision::Low).into(),
+ ],
+ ),
+ meta: token.meta,
+ })
+ }
+ };
+
+ let (ty, meta) = self.parse_type_non_void(frontend)?;
+
+ match frontend.module.types[ty].inner {
+ TypeInner::Scalar {
+ kind: ScalarKind::Float | ScalarKind::Sint,
+ ..
+ } => {}
+ _ => frontend.errors.push(Error {
+ kind: ErrorKind::SemanticError(
+ "Precision statement can only work on floats and ints".into(),
+ ),
+ meta,
+ }),
+ }
+
+ self.expect(frontend, TokenValue::Semicolon)?;
+
+ Ok(Some(meta))
+ }
+ _ => Ok(None),
+ }
+ }
+ }
+
+ pub fn parse_block_declaration(
+ &mut self,
+ frontend: &mut Frontend,
+ ctx: &mut Context,
+ body: &mut Block,
+ qualifiers: &mut TypeQualifiers,
+ ty_name: String,
+ mut meta: Span,
+ ) -> Result<Span> {
+ let layout = match qualifiers.layout_qualifiers.remove(&QualifierKey::Layout) {
+ Some((QualifierValue::Layout(l), _)) => l,
+ None => {
+ if let StorageQualifier::AddressSpace(AddressSpace::Storage { .. }) =
+ qualifiers.storage.0
+ {
+ StructLayout::Std430
+ } else {
+ StructLayout::Std140
+ }
+ }
+ _ => unreachable!(),
+ };
+
+ let mut members = Vec::new();
+ let span = self.parse_struct_declaration_list(frontend, &mut members, layout)?;
+ self.expect(frontend, TokenValue::RightBrace)?;
+
+ let mut ty = frontend.module.types.insert(
+ Type {
+ name: Some(ty_name),
+ inner: TypeInner::Struct {
+ members: members.clone(),
+ span,
+ },
+ },
+ Default::default(),
+ );
+
+ let token = self.bump(frontend)?;
+ let name = match token.value {
+ TokenValue::Semicolon => None,
+ TokenValue::Identifier(name) => {
+ self.parse_array_specifier(frontend, &mut meta, &mut ty)?;
+
+ self.expect(frontend, TokenValue::Semicolon)?;
+
+ Some(name)
+ }
+ _ => {
+ return Err(Error {
+ kind: ErrorKind::InvalidToken(
+ token.value,
+ vec![ExpectedToken::Identifier, TokenValue::Semicolon.into()],
+ ),
+ meta: token.meta,
+ })
+ }
+ };
+
+ let global = frontend.add_global_var(
+ ctx,
+ body,
+ VarDeclaration {
+ qualifiers,
+ ty,
+ name,
+ init: None,
+ meta,
+ },
+ )?;
+
+ for (i, k, ty) in members.into_iter().enumerate().filter_map(|(i, m)| {
+ let ty = m.ty;
+ m.name.map(|s| (i as u32, s, ty))
+ }) {
+ let lookup = GlobalLookup {
+ kind: match global {
+ GlobalOrConstant::Global(handle) => GlobalLookupKind::BlockSelect(handle, i),
+ GlobalOrConstant::Constant(handle) => GlobalLookupKind::Constant(handle, ty),
+ },
+ entry_arg: None,
+ mutable: true,
+ };
+ ctx.add_global(frontend, &k, lookup, body);
+
+ frontend.global_variables.push((k, lookup));
+ }
+
+ Ok(meta)
+ }
+
+ // TODO: Accept layout arguments
+ pub fn parse_struct_declaration_list(
+ &mut self,
+ frontend: &mut Frontend,
+ members: &mut Vec<StructMember>,
+ layout: StructLayout,
+ ) -> Result<u32> {
+ let mut span = 0;
+ let mut align = Alignment::ONE;
+
+ loop {
+ // TODO: type_qualifier
+
+ let (mut ty, mut meta) = self.parse_type_non_void(frontend)?;
+ let (name, end_meta) = self.expect_ident(frontend)?;
+
+ meta.subsume(end_meta);
+
+ self.parse_array_specifier(frontend, &mut meta, &mut ty)?;
+
+ self.expect(frontend, TokenValue::Semicolon)?;
+
+ let info = offset::calculate_offset(
+ ty,
+ meta,
+ layout,
+ &mut frontend.module.types,
+ &frontend.module.constants,
+ &mut frontend.errors,
+ );
+
+ let member_alignment = info.align;
+ span = member_alignment.round_up(span);
+ align = member_alignment.max(align);
+
+ members.push(StructMember {
+ name: Some(name),
+ ty: info.ty,
+ binding: None,
+ offset: span,
+ });
+
+ span += info.span;
+
+ if let TokenValue::RightBrace = self.expect_peek(frontend)?.value {
+ break;
+ }
+ }
+
+ span = align.round_up(span);
+
+ Ok(span)
+ }
+}
diff --git a/third_party/rust/naga/src/front/glsl/parser/expressions.rs b/third_party/rust/naga/src/front/glsl/parser/expressions.rs
new file mode 100644
index 0000000000..f09e58b6f6
--- /dev/null
+++ b/third_party/rust/naga/src/front/glsl/parser/expressions.rs
@@ -0,0 +1,546 @@
+use crate::{
+ front::glsl::{
+ ast::{FunctionCall, FunctionCallKind, HirExpr, HirExprKind},
+ context::{Context, StmtContext},
+ error::{ErrorKind, ExpectedToken},
+ parser::ParsingContext,
+ token::{Token, TokenValue},
+ Error, Frontend, Result, Span,
+ },
+ ArraySize, BinaryOperator, Block, Constant, ConstantInner, Handle, ScalarValue, Type,
+ TypeInner, UnaryOperator,
+};
+
+impl<'source> ParsingContext<'source> {
+ pub fn parse_primary(
+ &mut self,
+ frontend: &mut Frontend,
+ ctx: &mut Context,
+ stmt: &mut StmtContext,
+ body: &mut Block,
+ ) -> Result<Handle<HirExpr>> {
+ let mut token = self.bump(frontend)?;
+
+ let (width, value) = match token.value {
+ TokenValue::IntConstant(int) => (
+ (int.width / 8) as u8,
+ if int.signed {
+ ScalarValue::Sint(int.value as i64)
+ } else {
+ ScalarValue::Uint(int.value)
+ },
+ ),
+ TokenValue::FloatConstant(float) => (
+ (float.width / 8) as u8,
+ ScalarValue::Float(float.value as f64),
+ ),
+ TokenValue::BoolConstant(value) => (1, ScalarValue::Bool(value)),
+ TokenValue::LeftParen => {
+ let expr = self.parse_expression(frontend, ctx, stmt, body)?;
+ let meta = self.expect(frontend, TokenValue::RightParen)?.meta;
+
+ token.meta.subsume(meta);
+
+ return Ok(expr);
+ }
+ _ => {
+ return Err(Error {
+ kind: ErrorKind::InvalidToken(
+ token.value,
+ vec![
+ TokenValue::LeftParen.into(),
+ ExpectedToken::IntLiteral,
+ ExpectedToken::FloatLiteral,
+ ExpectedToken::BoolLiteral,
+ ],
+ ),
+ meta: token.meta,
+ });
+ }
+ };
+
+ let handle = frontend.module.constants.fetch_or_append(
+ Constant {
+ name: None,
+ specialization: None,
+ inner: ConstantInner::Scalar { width, value },
+ },
+ token.meta,
+ );
+
+ Ok(stmt.hir_exprs.append(
+ HirExpr {
+ kind: HirExprKind::Constant(handle),
+ meta: token.meta,
+ },
+ Default::default(),
+ ))
+ }
+
+ pub fn parse_function_call_args(
+ &mut self,
+ frontend: &mut Frontend,
+ ctx: &mut Context,
+ stmt: &mut StmtContext,
+ body: &mut Block,
+ meta: &mut Span,
+ ) -> Result<Vec<Handle<HirExpr>>> {
+ let mut args = Vec::new();
+ if let Some(token) = self.bump_if(frontend, TokenValue::RightParen) {
+ meta.subsume(token.meta);
+ } else {
+ loop {
+ args.push(self.parse_assignment(frontend, ctx, stmt, body)?);
+
+ let token = self.bump(frontend)?;
+ match token.value {
+ TokenValue::Comma => {}
+ TokenValue::RightParen => {
+ meta.subsume(token.meta);
+ break;
+ }
+ _ => {
+ return Err(Error {
+ kind: ErrorKind::InvalidToken(
+ token.value,
+ vec![TokenValue::Comma.into(), TokenValue::RightParen.into()],
+ ),
+ meta: token.meta,
+ });
+ }
+ }
+ }
+ }
+
+ Ok(args)
+ }
+
+ pub fn parse_postfix(
+ &mut self,
+ frontend: &mut Frontend,
+ ctx: &mut Context,
+ stmt: &mut StmtContext,
+ body: &mut Block,
+ ) -> Result<Handle<HirExpr>> {
+ let mut base = if self.peek_type_name(frontend) {
+ let (mut handle, mut meta) = self.parse_type_non_void(frontend)?;
+
+ self.expect(frontend, TokenValue::LeftParen)?;
+ let args = self.parse_function_call_args(frontend, ctx, stmt, body, &mut meta)?;
+
+ if let TypeInner::Array {
+ size: ArraySize::Dynamic,
+ stride,
+ base,
+ } = frontend.module.types[handle].inner
+ {
+ let span = frontend.module.types.get_span(handle);
+
+ let constant = frontend.module.constants.fetch_or_append(
+ Constant {
+ name: None,
+ specialization: None,
+ inner: ConstantInner::Scalar {
+ width: 4,
+ value: ScalarValue::Uint(args.len() as u64),
+ },
+ },
+ Span::default(),
+ );
+ handle = frontend.module.types.insert(
+ Type {
+ name: None,
+ inner: TypeInner::Array {
+ stride,
+ base,
+ size: ArraySize::Constant(constant),
+ },
+ },
+ span,
+ )
+ }
+
+ stmt.hir_exprs.append(
+ HirExpr {
+ kind: HirExprKind::Call(FunctionCall {
+ kind: FunctionCallKind::TypeConstructor(handle),
+ args,
+ }),
+ meta,
+ },
+ Default::default(),
+ )
+ } else if let TokenValue::Identifier(_) = self.expect_peek(frontend)?.value {
+ let (name, mut meta) = self.expect_ident(frontend)?;
+
+ let expr = if self.bump_if(frontend, TokenValue::LeftParen).is_some() {
+ let args = self.parse_function_call_args(frontend, ctx, stmt, body, &mut meta)?;
+
+ let kind = match frontend.lookup_type.get(&name) {
+ Some(ty) => FunctionCallKind::TypeConstructor(*ty),
+ None => FunctionCallKind::Function(name),
+ };
+
+ HirExpr {
+ kind: HirExprKind::Call(FunctionCall { kind, args }),
+ meta,
+ }
+ } else {
+ let var = match frontend.lookup_variable(ctx, body, &name, meta) {
+ Some(var) => var,
+ None => {
+ return Err(Error {
+ kind: ErrorKind::UnknownVariable(name),
+ meta,
+ })
+ }
+ };
+
+ HirExpr {
+ kind: HirExprKind::Variable(var),
+ meta,
+ }
+ };
+
+ stmt.hir_exprs.append(expr, Default::default())
+ } else {
+ self.parse_primary(frontend, ctx, stmt, body)?
+ };
+
+ while let TokenValue::LeftBracket
+ | TokenValue::Dot
+ | TokenValue::Increment
+ | TokenValue::Decrement = self.expect_peek(frontend)?.value
+ {
+ let Token { value, mut meta } = self.bump(frontend)?;
+
+ match value {
+ TokenValue::LeftBracket => {
+ let index = self.parse_expression(frontend, ctx, stmt, body)?;
+ let end_meta = self.expect(frontend, TokenValue::RightBracket)?.meta;
+
+ meta.subsume(end_meta);
+ base = stmt.hir_exprs.append(
+ HirExpr {
+ kind: HirExprKind::Access { base, index },
+ meta,
+ },
+ Default::default(),
+ )
+ }
+ TokenValue::Dot => {
+ let (field, end_meta) = self.expect_ident(frontend)?;
+
+ if self.bump_if(frontend, TokenValue::LeftParen).is_some() {
+ let args =
+ self.parse_function_call_args(frontend, ctx, stmt, body, &mut meta)?;
+
+ base = stmt.hir_exprs.append(
+ HirExpr {
+ kind: HirExprKind::Method {
+ expr: base,
+ name: field,
+ args,
+ },
+ meta,
+ },
+ Default::default(),
+ );
+ continue;
+ }
+
+ meta.subsume(end_meta);
+ base = stmt.hir_exprs.append(
+ HirExpr {
+ kind: HirExprKind::Select { base, field },
+ meta,
+ },
+ Default::default(),
+ )
+ }
+ TokenValue::Increment | TokenValue::Decrement => {
+ base = stmt.hir_exprs.append(
+ HirExpr {
+ kind: HirExprKind::PrePostfix {
+ op: match value {
+ TokenValue::Increment => crate::BinaryOperator::Add,
+ _ => crate::BinaryOperator::Subtract,
+ },
+ postfix: true,
+ expr: base,
+ },
+ meta,
+ },
+ Default::default(),
+ )
+ }
+ _ => unreachable!(),
+ }
+ }
+
+ Ok(base)
+ }
+
+ pub fn parse_unary(
+ &mut self,
+ frontend: &mut Frontend,
+ ctx: &mut Context,
+ stmt: &mut StmtContext,
+ body: &mut Block,
+ ) -> Result<Handle<HirExpr>> {
+ Ok(match self.expect_peek(frontend)?.value {
+ TokenValue::Plus | TokenValue::Dash | TokenValue::Bang | TokenValue::Tilde => {
+ let Token { value, mut meta } = self.bump(frontend)?;
+
+ let expr = self.parse_unary(frontend, ctx, stmt, body)?;
+ let end_meta = stmt.hir_exprs[expr].meta;
+
+ let kind = match value {
+ TokenValue::Dash => HirExprKind::Unary {
+ op: UnaryOperator::Negate,
+ expr,
+ },
+ TokenValue::Bang | TokenValue::Tilde => HirExprKind::Unary {
+ op: UnaryOperator::Not,
+ expr,
+ },
+ _ => return Ok(expr),
+ };
+
+ meta.subsume(end_meta);
+ stmt.hir_exprs
+ .append(HirExpr { kind, meta }, Default::default())
+ }
+ TokenValue::Increment | TokenValue::Decrement => {
+ let Token { value, meta } = self.bump(frontend)?;
+
+ let expr = self.parse_unary(frontend, ctx, stmt, body)?;
+
+ stmt.hir_exprs.append(
+ HirExpr {
+ kind: HirExprKind::PrePostfix {
+ op: match value {
+ TokenValue::Increment => crate::BinaryOperator::Add,
+ _ => crate::BinaryOperator::Subtract,
+ },
+ postfix: false,
+ expr,
+ },
+ meta,
+ },
+ Default::default(),
+ )
+ }
+ _ => self.parse_postfix(frontend, ctx, stmt, body)?,
+ })
+ }
+
+ pub fn parse_binary(
+ &mut self,
+ frontend: &mut Frontend,
+ ctx: &mut Context,
+ stmt: &mut StmtContext,
+ body: &mut Block,
+ passthrough: Option<Handle<HirExpr>>,
+ min_bp: u8,
+ ) -> Result<Handle<HirExpr>> {
+ let mut left = passthrough
+ .ok_or(ErrorKind::EndOfFile /* Dummy error */)
+ .or_else(|_| self.parse_unary(frontend, ctx, stmt, body))?;
+ let mut meta = stmt.hir_exprs[left].meta;
+
+ while let Some((l_bp, r_bp)) = binding_power(&self.expect_peek(frontend)?.value) {
+ if l_bp < min_bp {
+ break;
+ }
+
+ let Token { value, .. } = self.bump(frontend)?;
+
+ let right = self.parse_binary(frontend, ctx, stmt, body, None, r_bp)?;
+ let end_meta = stmt.hir_exprs[right].meta;
+
+ meta.subsume(end_meta);
+ left = stmt.hir_exprs.append(
+ HirExpr {
+ kind: HirExprKind::Binary {
+ left,
+ op: match value {
+ TokenValue::LogicalOr => BinaryOperator::LogicalOr,
+ TokenValue::LogicalXor => BinaryOperator::NotEqual,
+ TokenValue::LogicalAnd => BinaryOperator::LogicalAnd,
+ TokenValue::VerticalBar => BinaryOperator::InclusiveOr,
+ TokenValue::Caret => BinaryOperator::ExclusiveOr,
+ TokenValue::Ampersand => BinaryOperator::And,
+ TokenValue::Equal => BinaryOperator::Equal,
+ TokenValue::NotEqual => BinaryOperator::NotEqual,
+ TokenValue::GreaterEqual => BinaryOperator::GreaterEqual,
+ TokenValue::LessEqual => BinaryOperator::LessEqual,
+ TokenValue::LeftAngle => BinaryOperator::Less,
+ TokenValue::RightAngle => BinaryOperator::Greater,
+ TokenValue::LeftShift => BinaryOperator::ShiftLeft,
+ TokenValue::RightShift => BinaryOperator::ShiftRight,
+ TokenValue::Plus => BinaryOperator::Add,
+ TokenValue::Dash => BinaryOperator::Subtract,
+ TokenValue::Star => BinaryOperator::Multiply,
+ TokenValue::Slash => BinaryOperator::Divide,
+ TokenValue::Percent => BinaryOperator::Modulo,
+ _ => unreachable!(),
+ },
+ right,
+ },
+ meta,
+ },
+ Default::default(),
+ )
+ }
+
+ Ok(left)
+ }
+
+ pub fn parse_conditional(
+ &mut self,
+ frontend: &mut Frontend,
+ ctx: &mut Context,
+ stmt: &mut StmtContext,
+ body: &mut Block,
+ passthrough: Option<Handle<HirExpr>>,
+ ) -> Result<Handle<HirExpr>> {
+ let mut condition = self.parse_binary(frontend, ctx, stmt, body, passthrough, 0)?;
+ let mut meta = stmt.hir_exprs[condition].meta;
+
+ if self.bump_if(frontend, TokenValue::Question).is_some() {
+ let accept = self.parse_expression(frontend, ctx, stmt, body)?;
+ self.expect(frontend, TokenValue::Colon)?;
+ let reject = self.parse_assignment(frontend, ctx, stmt, body)?;
+ let end_meta = stmt.hir_exprs[reject].meta;
+
+ meta.subsume(end_meta);
+ condition = stmt.hir_exprs.append(
+ HirExpr {
+ kind: HirExprKind::Conditional {
+ condition,
+ accept,
+ reject,
+ },
+ meta,
+ },
+ Default::default(),
+ )
+ }
+
+ Ok(condition)
+ }
+
+ pub fn parse_assignment(
+ &mut self,
+ frontend: &mut Frontend,
+ ctx: &mut Context,
+ stmt: &mut StmtContext,
+ body: &mut Block,
+ ) -> Result<Handle<HirExpr>> {
+ let tgt = self.parse_unary(frontend, ctx, stmt, body)?;
+ let mut meta = stmt.hir_exprs[tgt].meta;
+
+ Ok(match self.expect_peek(frontend)?.value {
+ TokenValue::Assign => {
+ self.bump(frontend)?;
+ let value = self.parse_assignment(frontend, ctx, stmt, body)?;
+ let end_meta = stmt.hir_exprs[value].meta;
+
+ meta.subsume(end_meta);
+ stmt.hir_exprs.append(
+ HirExpr {
+ kind: HirExprKind::Assign { tgt, value },
+ meta,
+ },
+ Default::default(),
+ )
+ }
+ TokenValue::OrAssign
+ | TokenValue::AndAssign
+ | TokenValue::AddAssign
+ | TokenValue::DivAssign
+ | TokenValue::ModAssign
+ | TokenValue::SubAssign
+ | TokenValue::MulAssign
+ | TokenValue::LeftShiftAssign
+ | TokenValue::RightShiftAssign
+ | TokenValue::XorAssign => {
+ let token = self.bump(frontend)?;
+ let right = self.parse_assignment(frontend, ctx, stmt, body)?;
+ let end_meta = stmt.hir_exprs[right].meta;
+
+ meta.subsume(end_meta);
+ let value = stmt.hir_exprs.append(
+ HirExpr {
+ meta,
+ kind: HirExprKind::Binary {
+ left: tgt,
+ op: match token.value {
+ TokenValue::OrAssign => BinaryOperator::InclusiveOr,
+ TokenValue::AndAssign => BinaryOperator::And,
+ TokenValue::AddAssign => BinaryOperator::Add,
+ TokenValue::DivAssign => BinaryOperator::Divide,
+ TokenValue::ModAssign => BinaryOperator::Modulo,
+ TokenValue::SubAssign => BinaryOperator::Subtract,
+ TokenValue::MulAssign => BinaryOperator::Multiply,
+ TokenValue::LeftShiftAssign => BinaryOperator::ShiftLeft,
+ TokenValue::RightShiftAssign => BinaryOperator::ShiftRight,
+ TokenValue::XorAssign => BinaryOperator::ExclusiveOr,
+ _ => unreachable!(),
+ },
+ right,
+ },
+ },
+ Default::default(),
+ );
+
+ stmt.hir_exprs.append(
+ HirExpr {
+ kind: HirExprKind::Assign { tgt, value },
+ meta,
+ },
+ Default::default(),
+ )
+ }
+ _ => self.parse_conditional(frontend, ctx, stmt, body, Some(tgt))?,
+ })
+ }
+
+ pub fn parse_expression(
+ &mut self,
+ frontend: &mut Frontend,
+ ctx: &mut Context,
+ stmt: &mut StmtContext,
+ body: &mut Block,
+ ) -> Result<Handle<HirExpr>> {
+ let mut expr = self.parse_assignment(frontend, ctx, stmt, body)?;
+
+ while let TokenValue::Comma = self.expect_peek(frontend)?.value {
+ self.bump(frontend)?;
+ expr = self.parse_assignment(frontend, ctx, stmt, body)?;
+ }
+
+ Ok(expr)
+ }
+}
+
+const fn binding_power(value: &TokenValue) -> Option<(u8, u8)> {
+ Some(match *value {
+ TokenValue::LogicalOr => (1, 2),
+ TokenValue::LogicalXor => (3, 4),
+ TokenValue::LogicalAnd => (5, 6),
+ TokenValue::VerticalBar => (7, 8),
+ TokenValue::Caret => (9, 10),
+ TokenValue::Ampersand => (11, 12),
+ TokenValue::Equal | TokenValue::NotEqual => (13, 14),
+ TokenValue::GreaterEqual
+ | TokenValue::LessEqual
+ | TokenValue::LeftAngle
+ | TokenValue::RightAngle => (15, 16),
+ TokenValue::LeftShift | TokenValue::RightShift => (17, 18),
+ TokenValue::Plus | TokenValue::Dash => (19, 20),
+ TokenValue::Star | TokenValue::Slash | TokenValue::Percent => (21, 22),
+ _ => return None,
+ })
+}
diff --git a/third_party/rust/naga/src/front/glsl/parser/functions.rs b/third_party/rust/naga/src/front/glsl/parser/functions.rs
new file mode 100644
index 0000000000..781f4cffed
--- /dev/null
+++ b/third_party/rust/naga/src/front/glsl/parser/functions.rs
@@ -0,0 +1,654 @@
+use crate::front::glsl::context::ExprPos;
+use crate::front::glsl::Span;
+use crate::{
+ front::glsl::{
+ ast::ParameterQualifier,
+ context::Context,
+ parser::ParsingContext,
+ token::{Token, TokenValue},
+ variables::VarDeclaration,
+ Error, ErrorKind, Frontend, Result,
+ },
+ Block, ConstantInner, Expression, ScalarValue, Statement, SwitchCase, UnaryOperator,
+};
+
+impl<'source> ParsingContext<'source> {
+ pub fn peek_parameter_qualifier(&mut self, frontend: &mut Frontend) -> bool {
+ self.peek(frontend).map_or(false, |t| match t.value {
+ TokenValue::In | TokenValue::Out | TokenValue::InOut | TokenValue::Const => true,
+ _ => false,
+ })
+ }
+
+ /// Returns the parsed `ParameterQualifier` or `ParameterQualifier::In`
+ pub fn parse_parameter_qualifier(&mut self, frontend: &mut Frontend) -> ParameterQualifier {
+ if self.peek_parameter_qualifier(frontend) {
+ match self.bump(frontend).unwrap().value {
+ TokenValue::In => ParameterQualifier::In,
+ TokenValue::Out => ParameterQualifier::Out,
+ TokenValue::InOut => ParameterQualifier::InOut,
+ TokenValue::Const => ParameterQualifier::Const,
+ _ => unreachable!(),
+ }
+ } else {
+ ParameterQualifier::In
+ }
+ }
+
+ pub fn parse_statement(
+ &mut self,
+ frontend: &mut Frontend,
+ ctx: &mut Context,
+ body: &mut Block,
+ terminator: &mut Option<usize>,
+ ) -> Result<Option<Span>> {
+ // Type qualifiers always identify a declaration statement
+ if self.peek_type_qualifier(frontend) {
+ return self.parse_declaration(frontend, ctx, body, false);
+ }
+
+ // Type names can identify either declaration statements or type constructors
+ // depending on wether the token following the type name is a `(` (LeftParen)
+ if self.peek_type_name(frontend) {
+ // Start by consuming the type name so that we can peek the token after it
+ let token = self.bump(frontend)?;
+ // Peek the next token and check if it's a `(` (LeftParen) if so the statement
+ // is a constructor, otherwise it's a declaration. We need to do the check
+ // beforehand and not in the if since we will backtrack before the if
+ let declaration = TokenValue::LeftParen != self.expect_peek(frontend)?.value;
+
+ self.backtrack(token)?;
+
+ if declaration {
+ return self.parse_declaration(frontend, ctx, body, false);
+ }
+ }
+
+ let new_break = || {
+ let mut block = Block::new();
+ block.push(Statement::Break, crate::Span::default());
+ block
+ };
+
+ let &Token {
+ ref value,
+ mut meta,
+ } = self.expect_peek(frontend)?;
+
+ let meta_rest = match *value {
+ TokenValue::Continue => {
+ let meta = self.bump(frontend)?.meta;
+ body.push(Statement::Continue, meta);
+ terminator.get_or_insert(body.len());
+ self.expect(frontend, TokenValue::Semicolon)?.meta
+ }
+ TokenValue::Break => {
+ let meta = self.bump(frontend)?.meta;
+ body.push(Statement::Break, meta);
+ terminator.get_or_insert(body.len());
+ self.expect(frontend, TokenValue::Semicolon)?.meta
+ }
+ TokenValue::Return => {
+ self.bump(frontend)?;
+ let (value, meta) = match self.expect_peek(frontend)?.value {
+ TokenValue::Semicolon => (None, self.bump(frontend)?.meta),
+ _ => {
+ // TODO: Implicit conversions
+ let mut stmt = ctx.stmt_ctx();
+ let expr = self.parse_expression(frontend, ctx, &mut stmt, body)?;
+ self.expect(frontend, TokenValue::Semicolon)?;
+ let (handle, meta) =
+ ctx.lower_expect(stmt, frontend, expr, ExprPos::Rhs, body)?;
+ (Some(handle), meta)
+ }
+ };
+
+ ctx.emit_restart(body);
+
+ body.push(Statement::Return { value }, meta);
+ terminator.get_or_insert(body.len());
+
+ meta
+ }
+ TokenValue::Discard => {
+ let meta = self.bump(frontend)?.meta;
+ body.push(Statement::Kill, meta);
+ terminator.get_or_insert(body.len());
+
+ self.expect(frontend, TokenValue::Semicolon)?.meta
+ }
+ TokenValue::If => {
+ let mut meta = self.bump(frontend)?.meta;
+
+ self.expect(frontend, TokenValue::LeftParen)?;
+ let condition = {
+ let mut stmt = ctx.stmt_ctx();
+ let expr = self.parse_expression(frontend, ctx, &mut stmt, body)?;
+ let (handle, more_meta) =
+ ctx.lower_expect(stmt, frontend, expr, ExprPos::Rhs, body)?;
+ meta.subsume(more_meta);
+ handle
+ };
+ self.expect(frontend, TokenValue::RightParen)?;
+
+ ctx.emit_restart(body);
+
+ let mut accept = Block::new();
+ if let Some(more_meta) =
+ self.parse_statement(frontend, ctx, &mut accept, &mut None)?
+ {
+ meta.subsume(more_meta)
+ }
+
+ let mut reject = Block::new();
+ if self.bump_if(frontend, TokenValue::Else).is_some() {
+ if let Some(more_meta) =
+ self.parse_statement(frontend, ctx, &mut reject, &mut None)?
+ {
+ meta.subsume(more_meta);
+ }
+ }
+
+ body.push(
+ Statement::If {
+ condition,
+ accept,
+ reject,
+ },
+ meta,
+ );
+
+ meta
+ }
+ TokenValue::Switch => {
+ let mut meta = self.bump(frontend)?.meta;
+ let end_meta;
+
+ self.expect(frontend, TokenValue::LeftParen)?;
+
+ let (selector, uint) = {
+ let mut stmt = ctx.stmt_ctx();
+ let expr = self.parse_expression(frontend, ctx, &mut stmt, body)?;
+ let (root, meta) =
+ ctx.lower_expect(stmt, frontend, expr, ExprPos::Rhs, body)?;
+ let uint = frontend.resolve_type(ctx, root, meta)?.scalar_kind()
+ == Some(crate::ScalarKind::Uint);
+ (root, uint)
+ };
+
+ self.expect(frontend, TokenValue::RightParen)?;
+
+ ctx.emit_restart(body);
+
+ let mut cases = Vec::new();
+ // Track if any default case is present in the switch statement.
+ let mut default_present = false;
+
+ self.expect(frontend, TokenValue::LeftBrace)?;
+ loop {
+ let value = match self.expect_peek(frontend)?.value {
+ TokenValue::Case => {
+ self.bump(frontend)?;
+
+ let mut stmt = ctx.stmt_ctx();
+ let expr = self.parse_expression(frontend, ctx, &mut stmt, body)?;
+ let (root, meta) =
+ ctx.lower_expect(stmt, frontend, expr, ExprPos::Rhs, body)?;
+ let constant = frontend.solve_constant(ctx, root, meta)?;
+
+ match frontend.module.constants[constant].inner {
+ ConstantInner::Scalar {
+ value: ScalarValue::Sint(int),
+ ..
+ } => match uint {
+ true => crate::SwitchValue::U32(int as u32),
+ false => crate::SwitchValue::I32(int as i32),
+ },
+ ConstantInner::Scalar {
+ value: ScalarValue::Uint(int),
+ ..
+ } => crate::SwitchValue::U32(int as u32),
+ _ => {
+ frontend.errors.push(Error {
+ kind: ErrorKind::SemanticError(
+ "Case values can only be integers".into(),
+ ),
+ meta,
+ });
+
+ crate::SwitchValue::I32(0)
+ }
+ }
+ }
+ TokenValue::Default => {
+ self.bump(frontend)?;
+ default_present = true;
+ crate::SwitchValue::Default
+ }
+ TokenValue::RightBrace => {
+ end_meta = self.bump(frontend)?.meta;
+ break;
+ }
+ _ => {
+ let Token { value, meta } = self.bump(frontend)?;
+ return Err(Error {
+ kind: ErrorKind::InvalidToken(
+ value,
+ vec![
+ TokenValue::Case.into(),
+ TokenValue::Default.into(),
+ TokenValue::RightBrace.into(),
+ ],
+ ),
+ meta,
+ });
+ }
+ };
+
+ self.expect(frontend, TokenValue::Colon)?;
+
+ let mut body = Block::new();
+
+ let mut case_terminator = None;
+ loop {
+ match self.expect_peek(frontend)?.value {
+ TokenValue::Case | TokenValue::Default | TokenValue::RightBrace => {
+ break
+ }
+ _ => {
+ self.parse_statement(
+ frontend,
+ ctx,
+ &mut body,
+ &mut case_terminator,
+ )?;
+ }
+ }
+ }
+
+ let mut fall_through = true;
+
+ if let Some(mut idx) = case_terminator {
+ if let Statement::Break = body[idx - 1] {
+ fall_through = false;
+ idx -= 1;
+ }
+
+ body.cull(idx..)
+ }
+
+ cases.push(SwitchCase {
+ value,
+ body,
+ fall_through,
+ })
+ }
+
+ meta.subsume(end_meta);
+
+ // NOTE: do not unwrap here since a switch statement isn't required
+ // to have any cases.
+ if let Some(case) = cases.last_mut() {
+ // GLSL requires that the last case not be empty, so we check
+ // that here and produce an error otherwise (fall_through must
+ // also be checked because `break`s count as statements but
+ // they aren't added to the body)
+ if case.body.is_empty() && case.fall_through {
+ frontend.errors.push(Error {
+ kind: ErrorKind::SemanticError(
+ "last case/default label must be followed by statements".into(),
+ ),
+ meta,
+ })
+ }
+
+ // GLSL allows the last case to not have any `break` statement,
+ // this would mark it as fall through but naga's IR requires that
+ // the last case must not be fall through, so we mark need to mark
+ // the last case as not fall through always.
+ case.fall_through = false;
+ }
+
+ // Add an empty default case in case non was present, this is needed because
+ // naga's IR requires that all switch statements must have a default case but
+ // GLSL doesn't require that, so we might need to add an empty default case.
+ if !default_present {
+ cases.push(SwitchCase {
+ value: crate::SwitchValue::Default,
+ body: Block::new(),
+ fall_through: false,
+ })
+ }
+
+ body.push(Statement::Switch { selector, cases }, meta);
+
+ meta
+ }
+ TokenValue::While => {
+ let mut meta = self.bump(frontend)?.meta;
+
+ let mut loop_body = Block::new();
+
+ let mut stmt = ctx.stmt_ctx();
+ self.expect(frontend, TokenValue::LeftParen)?;
+ let root = self.parse_expression(frontend, ctx, &mut stmt, &mut loop_body)?;
+ meta.subsume(self.expect(frontend, TokenValue::RightParen)?.meta);
+
+ let (expr, expr_meta) =
+ ctx.lower_expect(stmt, frontend, root, ExprPos::Rhs, &mut loop_body)?;
+ let condition = ctx.add_expression(
+ Expression::Unary {
+ op: UnaryOperator::Not,
+ expr,
+ },
+ expr_meta,
+ &mut loop_body,
+ );
+
+ ctx.emit_restart(&mut loop_body);
+
+ loop_body.push(
+ Statement::If {
+ condition,
+ accept: new_break(),
+ reject: Block::new(),
+ },
+ crate::Span::default(),
+ );
+
+ meta.subsume(expr_meta);
+
+ if let Some(body_meta) =
+ self.parse_statement(frontend, ctx, &mut loop_body, &mut None)?
+ {
+ meta.subsume(body_meta);
+ }
+
+ body.push(
+ Statement::Loop {
+ body: loop_body,
+ continuing: Block::new(),
+ break_if: None,
+ },
+ meta,
+ );
+
+ meta
+ }
+ TokenValue::Do => {
+ let mut meta = self.bump(frontend)?.meta;
+
+ let mut loop_body = Block::new();
+
+ let mut terminator = None;
+ self.parse_statement(frontend, ctx, &mut loop_body, &mut terminator)?;
+
+ let mut stmt = ctx.stmt_ctx();
+
+ self.expect(frontend, TokenValue::While)?;
+ self.expect(frontend, TokenValue::LeftParen)?;
+ let root = self.parse_expression(frontend, ctx, &mut stmt, &mut loop_body)?;
+ let end_meta = self.expect(frontend, TokenValue::RightParen)?.meta;
+
+ meta.subsume(end_meta);
+
+ let (expr, expr_meta) =
+ ctx.lower_expect(stmt, frontend, root, ExprPos::Rhs, &mut loop_body)?;
+ let condition = ctx.add_expression(
+ Expression::Unary {
+ op: UnaryOperator::Not,
+ expr,
+ },
+ expr_meta,
+ &mut loop_body,
+ );
+
+ ctx.emit_restart(&mut loop_body);
+
+ loop_body.push(
+ Statement::If {
+ condition,
+ accept: new_break(),
+ reject: Block::new(),
+ },
+ crate::Span::default(),
+ );
+
+ if let Some(idx) = terminator {
+ loop_body.cull(idx..)
+ }
+
+ body.push(
+ Statement::Loop {
+ body: loop_body,
+ continuing: Block::new(),
+ break_if: None,
+ },
+ meta,
+ );
+
+ meta
+ }
+ TokenValue::For => {
+ let mut meta = self.bump(frontend)?.meta;
+
+ ctx.symbol_table.push_scope();
+ self.expect(frontend, TokenValue::LeftParen)?;
+
+ if self.bump_if(frontend, TokenValue::Semicolon).is_none() {
+ if self.peek_type_name(frontend) || self.peek_type_qualifier(frontend) {
+ self.parse_declaration(frontend, ctx, body, false)?;
+ } else {
+ let mut stmt = ctx.stmt_ctx();
+ let expr = self.parse_expression(frontend, ctx, &mut stmt, body)?;
+ ctx.lower(stmt, frontend, expr, ExprPos::Rhs, body)?;
+ self.expect(frontend, TokenValue::Semicolon)?;
+ }
+ }
+
+ let (mut block, mut continuing) = (Block::new(), Block::new());
+
+ if self.bump_if(frontend, TokenValue::Semicolon).is_none() {
+ let (expr, expr_meta) = if self.peek_type_name(frontend)
+ || self.peek_type_qualifier(frontend)
+ {
+ let mut qualifiers = self.parse_type_qualifiers(frontend)?;
+ let (ty, mut meta) = self.parse_type_non_void(frontend)?;
+ let name = self.expect_ident(frontend)?.0;
+
+ self.expect(frontend, TokenValue::Assign)?;
+
+ let (value, end_meta) =
+ self.parse_initializer(frontend, ty, ctx, &mut block)?;
+ meta.subsume(end_meta);
+
+ let decl = VarDeclaration {
+ qualifiers: &mut qualifiers,
+ ty,
+ name: Some(name),
+ init: None,
+ meta,
+ };
+
+ let pointer = frontend.add_local_var(ctx, &mut block, decl)?;
+
+ ctx.emit_restart(&mut block);
+
+ block.push(Statement::Store { pointer, value }, meta);
+
+ (value, end_meta)
+ } else {
+ let mut stmt = ctx.stmt_ctx();
+ let root = self.parse_expression(frontend, ctx, &mut stmt, &mut block)?;
+ ctx.lower_expect(stmt, frontend, root, ExprPos::Rhs, &mut block)?
+ };
+
+ let condition = ctx.add_expression(
+ Expression::Unary {
+ op: UnaryOperator::Not,
+ expr,
+ },
+ expr_meta,
+ &mut block,
+ );
+
+ ctx.emit_restart(&mut block);
+
+ block.push(
+ Statement::If {
+ condition,
+ accept: new_break(),
+ reject: Block::new(),
+ },
+ crate::Span::default(),
+ );
+
+ self.expect(frontend, TokenValue::Semicolon)?;
+ }
+
+ match self.expect_peek(frontend)?.value {
+ TokenValue::RightParen => {}
+ _ => {
+ let mut stmt = ctx.stmt_ctx();
+ let rest =
+ self.parse_expression(frontend, ctx, &mut stmt, &mut continuing)?;
+ ctx.lower(stmt, frontend, rest, ExprPos::Rhs, &mut continuing)?;
+ }
+ }
+
+ meta.subsume(self.expect(frontend, TokenValue::RightParen)?.meta);
+
+ if let Some(stmt_meta) =
+ self.parse_statement(frontend, ctx, &mut block, &mut None)?
+ {
+ meta.subsume(stmt_meta);
+ }
+
+ body.push(
+ Statement::Loop {
+ body: block,
+ continuing,
+ break_if: None,
+ },
+ meta,
+ );
+
+ ctx.symbol_table.pop_scope();
+
+ meta
+ }
+ TokenValue::LeftBrace => {
+ let meta = self.bump(frontend)?.meta;
+
+ let mut block = Block::new();
+
+ let mut block_terminator = None;
+ let meta = self.parse_compound_statement(
+ meta,
+ frontend,
+ ctx,
+ &mut block,
+ &mut block_terminator,
+ )?;
+
+ body.push(Statement::Block(block), meta);
+ if block_terminator.is_some() {
+ terminator.get_or_insert(body.len());
+ }
+
+ meta
+ }
+ TokenValue::Semicolon => self.bump(frontend)?.meta,
+ _ => {
+ // Attempt to force expression parsing for remainder of the
+ // tokens. Unknown or invalid tokens will be caught there and
+ // turned into an error.
+ let mut stmt = ctx.stmt_ctx();
+ let expr = self.parse_expression(frontend, ctx, &mut stmt, body)?;
+ ctx.lower(stmt, frontend, expr, ExprPos::Rhs, body)?;
+ self.expect(frontend, TokenValue::Semicolon)?.meta
+ }
+ };
+
+ meta.subsume(meta_rest);
+ Ok(Some(meta))
+ }
+
+ pub fn parse_compound_statement(
+ &mut self,
+ mut meta: Span,
+ frontend: &mut Frontend,
+ ctx: &mut Context,
+ body: &mut Block,
+ terminator: &mut Option<usize>,
+ ) -> Result<Span> {
+ ctx.symbol_table.push_scope();
+
+ loop {
+ if let Some(Token {
+ meta: brace_meta, ..
+ }) = self.bump_if(frontend, TokenValue::RightBrace)
+ {
+ meta.subsume(brace_meta);
+ break;
+ }
+
+ let stmt = self.parse_statement(frontend, ctx, body, terminator)?;
+
+ if let Some(stmt_meta) = stmt {
+ meta.subsume(stmt_meta);
+ }
+ }
+
+ if let Some(idx) = *terminator {
+ body.cull(idx..)
+ }
+
+ ctx.symbol_table.pop_scope();
+
+ Ok(meta)
+ }
+
+ pub fn parse_function_args(
+ &mut self,
+ frontend: &mut Frontend,
+ context: &mut Context,
+ body: &mut Block,
+ ) -> Result<()> {
+ if self.bump_if(frontend, TokenValue::Void).is_some() {
+ return Ok(());
+ }
+
+ loop {
+ if self.peek_type_name(frontend) || self.peek_parameter_qualifier(frontend) {
+ let qualifier = self.parse_parameter_qualifier(frontend);
+ let mut ty = self.parse_type_non_void(frontend)?.0;
+
+ match self.expect_peek(frontend)?.value {
+ TokenValue::Comma => {
+ self.bump(frontend)?;
+ context.add_function_arg(frontend, body, None, ty, qualifier);
+ continue;
+ }
+ TokenValue::Identifier(_) => {
+ let mut name = self.expect_ident(frontend)?;
+ self.parse_array_specifier(frontend, &mut name.1, &mut ty)?;
+
+ context.add_function_arg(frontend, body, Some(name), ty, qualifier);
+
+ if self.bump_if(frontend, TokenValue::Comma).is_some() {
+ continue;
+ }
+
+ break;
+ }
+ _ => break,
+ }
+ }
+
+ break;
+ }
+
+ Ok(())
+ }
+}
diff --git a/third_party/rust/naga/src/front/glsl/parser/types.rs b/third_party/rust/naga/src/front/glsl/parser/types.rs
new file mode 100644
index 0000000000..08a70669a0
--- /dev/null
+++ b/third_party/rust/naga/src/front/glsl/parser/types.rs
@@ -0,0 +1,434 @@
+use crate::{
+ front::glsl::{
+ ast::{QualifierKey, QualifierValue, StorageQualifier, StructLayout, TypeQualifiers},
+ error::ExpectedToken,
+ parser::ParsingContext,
+ token::{Token, TokenValue},
+ Error, ErrorKind, Frontend, Result,
+ },
+ AddressSpace, ArraySize, Handle, Span, Type, TypeInner,
+};
+
+impl<'source> ParsingContext<'source> {
+ /// Parses an optional array_specifier returning wether or not it's present
+ /// and modifying the type handle if it exists
+ pub fn parse_array_specifier(
+ &mut self,
+ frontend: &mut Frontend,
+ span: &mut Span,
+ ty: &mut Handle<Type>,
+ ) -> Result<()> {
+ while self.parse_array_specifier_single(frontend, span, ty)? {}
+ Ok(())
+ }
+
+ /// Implementation of [`Self::parse_array_specifier`] for a single array_specifier
+ fn parse_array_specifier_single(
+ &mut self,
+ frontend: &mut Frontend,
+ span: &mut Span,
+ ty: &mut Handle<Type>,
+ ) -> Result<bool> {
+ if self.bump_if(frontend, TokenValue::LeftBracket).is_some() {
+ let size = if let Some(Token { meta, .. }) =
+ self.bump_if(frontend, TokenValue::RightBracket)
+ {
+ span.subsume(meta);
+ ArraySize::Dynamic
+ } else {
+ let (value, constant_span) = self.parse_uint_constant(frontend)?;
+ let constant = frontend.module.constants.fetch_or_append(
+ crate::Constant {
+ name: None,
+ specialization: None,
+ inner: crate::ConstantInner::Scalar {
+ width: 4,
+ value: crate::ScalarValue::Uint(value as u64),
+ },
+ },
+ constant_span,
+ );
+ let end_span = self.expect(frontend, TokenValue::RightBracket)?.meta;
+ span.subsume(end_span);
+ ArraySize::Constant(constant)
+ };
+
+ frontend
+ .layouter
+ .update(&frontend.module.types, &frontend.module.constants)
+ .unwrap();
+ let stride = frontend.layouter[*ty].to_stride();
+ *ty = frontend.module.types.insert(
+ Type {
+ name: None,
+ inner: TypeInner::Array {
+ base: *ty,
+ size,
+ stride,
+ },
+ },
+ *span,
+ );
+
+ Ok(true)
+ } else {
+ Ok(false)
+ }
+ }
+
+ pub fn parse_type(&mut self, frontend: &mut Frontend) -> Result<(Option<Handle<Type>>, Span)> {
+ let token = self.bump(frontend)?;
+ let mut handle = match token.value {
+ TokenValue::Void => return Ok((None, token.meta)),
+ TokenValue::TypeName(ty) => frontend.module.types.insert(ty, token.meta),
+ TokenValue::Struct => {
+ let mut meta = token.meta;
+ let ty_name = self.expect_ident(frontend)?.0;
+ self.expect(frontend, TokenValue::LeftBrace)?;
+ let mut members = Vec::new();
+ let span = self.parse_struct_declaration_list(
+ frontend,
+ &mut members,
+ StructLayout::Std140,
+ )?;
+ let end_meta = self.expect(frontend, TokenValue::RightBrace)?.meta;
+ meta.subsume(end_meta);
+ let ty = frontend.module.types.insert(
+ Type {
+ name: Some(ty_name.clone()),
+ inner: TypeInner::Struct { members, span },
+ },
+ meta,
+ );
+ frontend.lookup_type.insert(ty_name, ty);
+ ty
+ }
+ TokenValue::Identifier(ident) => match frontend.lookup_type.get(&ident) {
+ Some(ty) => *ty,
+ None => {
+ return Err(Error {
+ kind: ErrorKind::UnknownType(ident),
+ meta: token.meta,
+ })
+ }
+ },
+ _ => {
+ return Err(Error {
+ kind: ErrorKind::InvalidToken(
+ token.value,
+ vec![
+ TokenValue::Void.into(),
+ TokenValue::Struct.into(),
+ ExpectedToken::TypeName,
+ ],
+ ),
+ meta: token.meta,
+ });
+ }
+ };
+
+ let mut span = token.meta;
+ self.parse_array_specifier(frontend, &mut span, &mut handle)?;
+ Ok((Some(handle), span))
+ }
+
+ pub fn parse_type_non_void(&mut self, frontend: &mut Frontend) -> Result<(Handle<Type>, Span)> {
+ let (maybe_ty, meta) = self.parse_type(frontend)?;
+ let ty = maybe_ty.ok_or_else(|| Error {
+ kind: ErrorKind::SemanticError("Type can't be void".into()),
+ meta,
+ })?;
+
+ Ok((ty, meta))
+ }
+
+ pub fn peek_type_qualifier(&mut self, frontend: &mut Frontend) -> bool {
+ self.peek(frontend).map_or(false, |t| match t.value {
+ TokenValue::Invariant
+ | TokenValue::Interpolation(_)
+ | TokenValue::Sampling(_)
+ | TokenValue::PrecisionQualifier(_)
+ | TokenValue::Const
+ | TokenValue::In
+ | TokenValue::Out
+ | TokenValue::Uniform
+ | TokenValue::Shared
+ | TokenValue::Buffer
+ | TokenValue::Restrict
+ | TokenValue::MemoryQualifier(_)
+ | TokenValue::Layout => true,
+ _ => false,
+ })
+ }
+
+ pub fn parse_type_qualifiers<'a>(
+ &mut self,
+ frontend: &mut Frontend,
+ ) -> Result<TypeQualifiers<'a>> {
+ let mut qualifiers = TypeQualifiers::default();
+
+ while self.peek_type_qualifier(frontend) {
+ let token = self.bump(frontend)?;
+
+ // Handle layout qualifiers outside the match since this can push multiple values
+ if token.value == TokenValue::Layout {
+ self.parse_layout_qualifier_id_list(frontend, &mut qualifiers)?;
+ continue;
+ }
+
+ qualifiers.span.subsume(token.meta);
+
+ match token.value {
+ TokenValue::Invariant => {
+ if qualifiers.invariant.is_some() {
+ frontend.errors.push(Error {
+ kind: ErrorKind::SemanticError(
+ "Cannot use more than one invariant qualifier per declaration"
+ .into(),
+ ),
+ meta: token.meta,
+ })
+ }
+
+ qualifiers.invariant = Some(token.meta);
+ }
+ TokenValue::Interpolation(i) => {
+ if qualifiers.interpolation.is_some() {
+ frontend.errors.push(Error {
+ kind: ErrorKind::SemanticError(
+ "Cannot use more than one interpolation qualifier per declaration"
+ .into(),
+ ),
+ meta: token.meta,
+ })
+ }
+
+ qualifiers.interpolation = Some((i, token.meta));
+ }
+ TokenValue::Const
+ | TokenValue::In
+ | TokenValue::Out
+ | TokenValue::Uniform
+ | TokenValue::Shared
+ | TokenValue::Buffer => {
+ let storage = match token.value {
+ TokenValue::Const => StorageQualifier::Const,
+ TokenValue::In => StorageQualifier::Input,
+ TokenValue::Out => StorageQualifier::Output,
+ TokenValue::Uniform => {
+ StorageQualifier::AddressSpace(AddressSpace::Uniform)
+ }
+ TokenValue::Shared => {
+ StorageQualifier::AddressSpace(AddressSpace::WorkGroup)
+ }
+ TokenValue::Buffer => {
+ StorageQualifier::AddressSpace(AddressSpace::Storage {
+ access: crate::StorageAccess::all(),
+ })
+ }
+ _ => unreachable!(),
+ };
+
+ if StorageQualifier::AddressSpace(AddressSpace::Function)
+ != qualifiers.storage.0
+ {
+ frontend.errors.push(Error {
+ kind: ErrorKind::SemanticError(
+ "Cannot use more than one storage qualifier per declaration".into(),
+ ),
+ meta: token.meta,
+ });
+ }
+
+ qualifiers.storage = (storage, token.meta);
+ }
+ TokenValue::Sampling(s) => {
+ if qualifiers.sampling.is_some() {
+ frontend.errors.push(Error {
+ kind: ErrorKind::SemanticError(
+ "Cannot use more than one sampling qualifier per declaration"
+ .into(),
+ ),
+ meta: token.meta,
+ })
+ }
+
+ qualifiers.sampling = Some((s, token.meta));
+ }
+ TokenValue::PrecisionQualifier(p) => {
+ if qualifiers.precision.is_some() {
+ frontend.errors.push(Error {
+ kind: ErrorKind::SemanticError(
+ "Cannot use more than one precision qualifier per declaration"
+ .into(),
+ ),
+ meta: token.meta,
+ })
+ }
+
+ qualifiers.precision = Some((p, token.meta));
+ }
+ TokenValue::MemoryQualifier(access) => {
+ let storage_access = qualifiers
+ .storage_access
+ .get_or_insert((crate::StorageAccess::all(), Span::default()));
+ if !storage_access.0.contains(!access) {
+ frontend.errors.push(Error {
+ kind: ErrorKind::SemanticError(
+ "The same memory qualifier can only be used once".into(),
+ ),
+ meta: token.meta,
+ })
+ }
+
+ storage_access.0 &= access;
+ storage_access.1.subsume(token.meta);
+ }
+ TokenValue::Restrict => continue,
+ _ => unreachable!(),
+ };
+ }
+
+ Ok(qualifiers)
+ }
+
+ pub fn parse_layout_qualifier_id_list(
+ &mut self,
+ frontend: &mut Frontend,
+ qualifiers: &mut TypeQualifiers,
+ ) -> Result<()> {
+ self.expect(frontend, TokenValue::LeftParen)?;
+ loop {
+ self.parse_layout_qualifier_id(frontend, &mut qualifiers.layout_qualifiers)?;
+
+ if self.bump_if(frontend, TokenValue::Comma).is_some() {
+ continue;
+ }
+
+ break;
+ }
+ let token = self.expect(frontend, TokenValue::RightParen)?;
+ qualifiers.span.subsume(token.meta);
+
+ Ok(())
+ }
+
+ pub fn parse_layout_qualifier_id(
+ &mut self,
+ frontend: &mut Frontend,
+ qualifiers: &mut crate::FastHashMap<QualifierKey, (QualifierValue, Span)>,
+ ) -> Result<()> {
+ // layout_qualifier_id:
+ // IDENTIFIER
+ // IDENTIFIER EQUAL constant_expression
+ // SHARED
+ let mut token = self.bump(frontend)?;
+ match token.value {
+ TokenValue::Identifier(name) => {
+ let (key, value) = match name.as_str() {
+ "std140" => (
+ QualifierKey::Layout,
+ QualifierValue::Layout(StructLayout::Std140),
+ ),
+ "std430" => (
+ QualifierKey::Layout,
+ QualifierValue::Layout(StructLayout::Std430),
+ ),
+ word => {
+ if let Some(format) = map_image_format(word) {
+ (QualifierKey::Format, QualifierValue::Format(format))
+ } else {
+ let key = QualifierKey::String(name.into());
+ let value = if self.bump_if(frontend, TokenValue::Assign).is_some() {
+ let (value, end_meta) = match self.parse_uint_constant(frontend) {
+ Ok(v) => v,
+ Err(e) => {
+ frontend.errors.push(e);
+ (0, Span::default())
+ }
+ };
+ token.meta.subsume(end_meta);
+
+ QualifierValue::Uint(value)
+ } else {
+ QualifierValue::None
+ };
+
+ (key, value)
+ }
+ }
+ };
+
+ qualifiers.insert(key, (value, token.meta));
+ }
+ _ => frontend.errors.push(Error {
+ kind: ErrorKind::InvalidToken(token.value, vec![ExpectedToken::Identifier]),
+ meta: token.meta,
+ }),
+ }
+
+ Ok(())
+ }
+
+ pub fn peek_type_name(&mut self, frontend: &mut Frontend) -> bool {
+ self.peek(frontend).map_or(false, |t| match t.value {
+ TokenValue::TypeName(_) | TokenValue::Void => true,
+ TokenValue::Struct => true,
+ TokenValue::Identifier(ref ident) => frontend.lookup_type.contains_key(ident),
+ _ => false,
+ })
+ }
+}
+
+fn map_image_format(word: &str) -> Option<crate::StorageFormat> {
+ use crate::StorageFormat as Sf;
+
+ let format = match word {
+ // float-image-format-qualifier:
+ "rgba32f" => Sf::Rgba32Float,
+ "rgba16f" => Sf::Rgba16Float,
+ "rg32f" => Sf::Rg32Float,
+ "rg16f" => Sf::Rg16Float,
+ "r11f_g11f_b10f" => Sf::Rg11b10Float,
+ "r32f" => Sf::R32Float,
+ "r16f" => Sf::R16Float,
+ "rgba16" => Sf::Rgba16Unorm,
+ "rgb10_a2" => Sf::Rgb10a2Unorm,
+ "rgba8" => Sf::Rgba8Unorm,
+ "rg16" => Sf::Rg16Unorm,
+ "rg8" => Sf::Rg8Unorm,
+ "r16" => Sf::R16Unorm,
+ "r8" => Sf::R8Unorm,
+ "rgba16_snorm" => Sf::Rgba16Snorm,
+ "rgba8_snorm" => Sf::Rgba8Snorm,
+ "rg16_snorm" => Sf::Rg16Snorm,
+ "rg8_snorm" => Sf::Rg8Snorm,
+ "r16_snorm" => Sf::R16Snorm,
+ "r8_snorm" => Sf::R8Snorm,
+ // int-image-format-qualifier:
+ "rgba32i" => Sf::Rgba32Sint,
+ "rgba16i" => Sf::Rgba16Sint,
+ "rgba8i" => Sf::Rgba8Sint,
+ "rg32i" => Sf::Rg32Sint,
+ "rg16i" => Sf::Rg16Sint,
+ "rg8i" => Sf::Rg8Sint,
+ "r32i" => Sf::R32Sint,
+ "r16i" => Sf::R16Sint,
+ "r8i" => Sf::R8Sint,
+ // uint-image-format-qualifier:
+ "rgba32ui" => Sf::Rgba32Uint,
+ "rgba16ui" => Sf::Rgba16Uint,
+ "rgba8ui" => Sf::Rgba8Uint,
+ "rg32ui" => Sf::Rg32Uint,
+ "rg16ui" => Sf::Rg16Uint,
+ "rg8ui" => Sf::Rg8Uint,
+ "r32ui" => Sf::R32Uint,
+ "r16ui" => Sf::R16Uint,
+ "r8ui" => Sf::R8Uint,
+ // TODO: These next ones seem incorrect to me
+ // "rgb10_a2ui" => Sf::Rgb10a2Unorm,
+ _ => return None,
+ };
+
+ Some(format)
+}
diff --git a/third_party/rust/naga/src/front/glsl/parser_tests.rs b/third_party/rust/naga/src/front/glsl/parser_tests.rs
new file mode 100644
index 0000000000..ae529a6fbe
--- /dev/null
+++ b/third_party/rust/naga/src/front/glsl/parser_tests.rs
@@ -0,0 +1,832 @@
+use super::{
+ ast::Profile,
+ error::ExpectedToken,
+ error::{Error, ErrorKind},
+ token::TokenValue,
+ Frontend, Options, Span,
+};
+use crate::ShaderStage;
+use pp_rs::token::PreprocessorError;
+
+#[test]
+fn version() {
+ let mut frontend = Frontend::default();
+
+ // invalid versions
+ assert_eq!(
+ frontend
+ .parse(
+ &Options::from(ShaderStage::Vertex),
+ "#version 99000\n void main(){}",
+ )
+ .err()
+ .unwrap(),
+ vec![Error {
+ kind: ErrorKind::InvalidVersion(99000),
+ meta: Span::new(9, 14)
+ }],
+ );
+
+ assert_eq!(
+ frontend
+ .parse(
+ &Options::from(ShaderStage::Vertex),
+ "#version 449\n void main(){}",
+ )
+ .err()
+ .unwrap(),
+ vec![Error {
+ kind: ErrorKind::InvalidVersion(449),
+ meta: Span::new(9, 12)
+ }]
+ );
+
+ assert_eq!(
+ frontend
+ .parse(
+ &Options::from(ShaderStage::Vertex),
+ "#version 450 smart\n void main(){}",
+ )
+ .err()
+ .unwrap(),
+ vec![Error {
+ kind: ErrorKind::InvalidProfile("smart".into()),
+ meta: Span::new(13, 18),
+ }]
+ );
+
+ assert_eq!(
+ frontend
+ .parse(
+ &Options::from(ShaderStage::Vertex),
+ "#version 450\nvoid main(){} #version 450",
+ )
+ .err()
+ .unwrap(),
+ vec![
+ Error {
+ kind: ErrorKind::PreprocessorError(PreprocessorError::UnexpectedHash,),
+ meta: Span::new(27, 28),
+ },
+ Error {
+ kind: ErrorKind::InvalidToken(
+ TokenValue::Identifier("version".into()),
+ vec![ExpectedToken::Eof]
+ ),
+ meta: Span::new(28, 35)
+ }
+ ]
+ );
+
+ // valid versions
+ frontend
+ .parse(
+ &Options::from(ShaderStage::Vertex),
+ " # version 450\nvoid main() {}",
+ )
+ .unwrap();
+ assert_eq!(
+ (frontend.metadata().version, frontend.metadata().profile),
+ (450, Profile::Core)
+ );
+
+ frontend
+ .parse(
+ &Options::from(ShaderStage::Vertex),
+ "#version 450\nvoid main() {}",
+ )
+ .unwrap();
+ assert_eq!(
+ (frontend.metadata().version, frontend.metadata().profile),
+ (450, Profile::Core)
+ );
+
+ frontend
+ .parse(
+ &Options::from(ShaderStage::Vertex),
+ "#version 450 core\nvoid main(void) {}",
+ )
+ .unwrap();
+ assert_eq!(
+ (frontend.metadata().version, frontend.metadata().profile),
+ (450, Profile::Core)
+ );
+}
+
+#[test]
+fn control_flow() {
+ let mut frontend = Frontend::default();
+
+ frontend
+ .parse(
+ &Options::from(ShaderStage::Vertex),
+ r#"
+ # version 450
+ void main() {
+ if (true) {
+ return 1;
+ } else {
+ return 2;
+ }
+ }
+ "#,
+ )
+ .unwrap();
+
+ frontend
+ .parse(
+ &Options::from(ShaderStage::Vertex),
+ r#"
+ # version 450
+ void main() {
+ if (true) {
+ return 1;
+ }
+ }
+ "#,
+ )
+ .unwrap();
+
+ frontend
+ .parse(
+ &Options::from(ShaderStage::Vertex),
+ r#"
+ # version 450
+ void main() {
+ int x;
+ int y = 3;
+ switch (5) {
+ case 2:
+ x = 2;
+ case 5:
+ x = 5;
+ y = 2;
+ break;
+ default:
+ x = 0;
+ }
+ }
+ "#,
+ )
+ .unwrap();
+
+ frontend
+ .parse(
+ &Options::from(ShaderStage::Vertex),
+ r#"
+ # version 450
+ void main() {
+ int x = 0;
+ while(x < 5) {
+ x = x + 1;
+ }
+ do {
+ x = x - 1;
+ } while(x >= 4)
+ }
+ "#,
+ )
+ .unwrap();
+
+ frontend
+ .parse(
+ &Options::from(ShaderStage::Vertex),
+ r#"
+ # version 450
+ void main() {
+ int x = 0;
+ for(int i = 0; i < 10;) {
+ x = x + 2;
+ }
+ for(;;);
+ return x;
+ }
+ "#,
+ )
+ .unwrap();
+}
+
+#[test]
+fn declarations() {
+ let mut frontend = Frontend::default();
+
+ frontend
+ .parse(
+ &Options::from(ShaderStage::Vertex),
+ r#"
+ #version 450
+ layout(location = 0) in vec2 v_uv;
+ layout(location = 0) out vec4 o_color;
+ layout(set = 1, binding = 1) uniform texture2D tex;
+ layout(set = 1, binding = 2) uniform sampler tex_sampler;
+
+ layout(early_fragment_tests) in;
+
+ void main() {}
+ "#,
+ )
+ .unwrap();
+
+ frontend
+ .parse(
+ &Options::from(ShaderStage::Vertex),
+ r#"
+ #version 450
+ layout(std140, set = 2, binding = 0)
+ uniform u_locals {
+ vec3 model_offs;
+ float load_time;
+ ivec4 atlas_offs;
+ };
+
+ void main() {}
+ "#,
+ )
+ .unwrap();
+
+ frontend
+ .parse(
+ &Options::from(ShaderStage::Vertex),
+ r#"
+ #version 450
+ layout(push_constant)
+ uniform u_locals {
+ vec3 model_offs;
+ float load_time;
+ ivec4 atlas_offs;
+ };
+
+ void main() {}
+ "#,
+ )
+ .unwrap();
+
+ frontend
+ .parse(
+ &Options::from(ShaderStage::Vertex),
+ r#"
+ #version 450
+ layout(std430, set = 2, binding = 0)
+ uniform u_locals {
+ vec3 model_offs;
+ float load_time;
+ ivec4 atlas_offs;
+ };
+
+ void main() {}
+ "#,
+ )
+ .unwrap();
+
+ frontend
+ .parse(
+ &Options::from(ShaderStage::Vertex),
+ r#"
+ #version 450
+ layout(std140, set = 2, binding = 0)
+ uniform u_locals {
+ vec3 model_offs;
+ float load_time;
+ } block_var;
+
+ void main() {
+ load_time * model_offs;
+ block_var.load_time * block_var.model_offs;
+ }
+ "#,
+ )
+ .unwrap();
+
+ frontend
+ .parse(
+ &Options::from(ShaderStage::Vertex),
+ r#"
+ #version 450
+ float vector = vec4(1.0 / 17.0, 9.0 / 17.0, 3.0 / 17.0, 11.0 / 17.0);
+
+ void main() {}
+ "#,
+ )
+ .unwrap();
+
+ frontend
+ .parse(
+ &Options::from(ShaderStage::Vertex),
+ r#"
+ #version 450
+ precision highp float;
+
+ void main() {}
+ "#,
+ )
+ .unwrap();
+}
+
+#[test]
+fn textures() {
+ let mut frontend = Frontend::default();
+
+ frontend
+ .parse(
+ &Options::from(ShaderStage::Vertex),
+ r#"
+ #version 450
+ layout(location = 0) in vec2 v_uv;
+ layout(location = 0) out vec4 o_color;
+ layout(set = 1, binding = 1) uniform texture2D tex;
+ layout(set = 1, binding = 2) uniform sampler tex_sampler;
+ void main() {
+ o_color = texture(sampler2D(tex, tex_sampler), v_uv);
+ o_color.a = texture(sampler2D(tex, tex_sampler), v_uv, 2.0).a;
+ }
+ "#,
+ )
+ .unwrap();
+}
+
+#[test]
+fn functions() {
+ let mut frontend = Frontend::default();
+
+ frontend
+ .parse(
+ &Options::from(ShaderStage::Vertex),
+ r#"
+ # version 450
+ void test1(float);
+ void test1(float) {}
+
+ void main() {}
+ "#,
+ )
+ .unwrap();
+
+ frontend
+ .parse(
+ &Options::from(ShaderStage::Vertex),
+ r#"
+ # version 450
+ void test2(float a) {}
+ void test3(float a, float b) {}
+ void test4(float, float) {}
+
+ void main() {}
+ "#,
+ )
+ .unwrap();
+
+ frontend
+ .parse(
+ &Options::from(ShaderStage::Vertex),
+ r#"
+ # version 450
+ float test(float a) { return a; }
+
+ void main() {}
+ "#,
+ )
+ .unwrap();
+
+ frontend
+ .parse(
+ &Options::from(ShaderStage::Vertex),
+ r#"
+ # version 450
+ float test(vec4 p) {
+ return p.x;
+ }
+
+ void main() {}
+ "#,
+ )
+ .unwrap();
+
+ // Function overloading
+ frontend
+ .parse(
+ &Options::from(ShaderStage::Vertex),
+ r#"
+ # version 450
+ float test(vec2 p);
+ float test(vec3 p);
+ float test(vec4 p);
+
+ float test(vec2 p) {
+ return p.x;
+ }
+
+ float test(vec3 p) {
+ return p.x;
+ }
+
+ float test(vec4 p) {
+ return p.x;
+ }
+
+ void main() {}
+ "#,
+ )
+ .unwrap();
+
+ assert_eq!(
+ frontend
+ .parse(
+ &Options::from(ShaderStage::Vertex),
+ r#"
+ # version 450
+ int test(vec4 p) {
+ return p.x;
+ }
+
+ float test(vec4 p) {
+ return p.x;
+ }
+
+ void main() {}
+ "#,
+ )
+ .err()
+ .unwrap(),
+ vec![Error {
+ kind: ErrorKind::SemanticError("Function already defined".into()),
+ meta: Span::new(134, 152),
+ }]
+ );
+
+ println!();
+
+ frontend
+ .parse(
+ &Options::from(ShaderStage::Vertex),
+ r#"
+ # version 450
+ float callee(uint q) {
+ return float(q);
+ }
+
+ float caller() {
+ callee(1u);
+ }
+
+ void main() {}
+ "#,
+ )
+ .unwrap();
+
+ // Nested function call
+ frontend
+ .parse(
+ &Options::from(ShaderStage::Vertex),
+ r#"
+ # version 450
+ layout(set = 0, binding = 1) uniform texture2D t_noise;
+ layout(set = 0, binding = 2) uniform sampler s_noise;
+
+ void main() {
+ textureLod(sampler2D(t_noise, s_noise), vec2(1.0), 0);
+ }
+ "#,
+ )
+ .unwrap();
+
+ frontend
+ .parse(
+ &Options::from(ShaderStage::Vertex),
+ r#"
+ # version 450
+ void fun(vec2 in_parameter, out float out_parameter) {
+ ivec2 _ = ivec2(in_parameter);
+ }
+
+ void main() {
+ float a;
+ fun(vec2(1.0), a);
+ }
+ "#,
+ )
+ .unwrap();
+}
+
+#[test]
+fn constants() {
+ use crate::{Constant, ConstantInner, ScalarValue};
+ let mut frontend = Frontend::default();
+
+ let module = frontend
+ .parse(
+ &Options::from(ShaderStage::Vertex),
+ r#"
+ # version 450
+ const float a = 1.0;
+ float global = a;
+ const float b = a;
+
+ void main() {}
+ "#,
+ )
+ .unwrap();
+
+ let mut constants = module.constants.iter();
+
+ assert_eq!(
+ constants.next().unwrap().1,
+ &Constant {
+ name: Some("a".to_owned()),
+ specialization: None,
+ inner: ConstantInner::Scalar {
+ width: 4,
+ value: ScalarValue::Float(1.0)
+ }
+ }
+ );
+ assert_eq!(
+ constants.next().unwrap().1,
+ &Constant {
+ name: Some("b".to_owned()),
+ specialization: None,
+ inner: ConstantInner::Scalar {
+ width: 4,
+ value: ScalarValue::Float(1.0)
+ }
+ }
+ );
+
+ assert!(constants.next().is_none());
+}
+
+#[test]
+fn function_overloading() {
+ let mut frontend = Frontend::default();
+
+ frontend
+ .parse(
+ &Options::from(ShaderStage::Vertex),
+ r#"
+ # version 450
+
+ float saturate(float v) { return clamp(v, 0.0, 1.0); }
+ vec2 saturate(vec2 v) { return clamp(v, vec2(0.0), vec2(1.0)); }
+ vec3 saturate(vec3 v) { return clamp(v, vec3(0.0), vec3(1.0)); }
+ vec4 saturate(vec4 v) { return clamp(v, vec4(0.0), vec4(1.0)); }
+
+ void main() {
+ float v1 = saturate(1.5);
+ vec2 v2 = saturate(vec2(0.5, 1.5));
+ vec3 v3 = saturate(vec3(0.5, 1.5, 2.5));
+ vec3 v4 = saturate(vec4(0.5, 1.5, 2.5, 3.5));
+ }
+ "#,
+ )
+ .unwrap();
+}
+
+#[test]
+fn implicit_conversions() {
+ let mut frontend = Frontend::default();
+
+ frontend
+ .parse(
+ &Options::from(ShaderStage::Vertex),
+ r#"
+ # version 450
+ void main() {
+ mat4 a = mat4(1);
+ float b = 1u;
+ float c = 1 + 2.0;
+ }
+ "#,
+ )
+ .unwrap();
+
+ assert_eq!(
+ frontend
+ .parse(
+ &Options::from(ShaderStage::Vertex),
+ r#"
+ # version 450
+ void test(int a) {}
+ void test(uint a) {}
+
+ void main() {
+ test(1.0);
+ }
+ "#,
+ )
+ .err()
+ .unwrap(),
+ vec![Error {
+ kind: ErrorKind::SemanticError("Unknown function \'test\'".into()),
+ meta: Span::new(156, 165),
+ }]
+ );
+
+ assert_eq!(
+ frontend
+ .parse(
+ &Options::from(ShaderStage::Vertex),
+ r#"
+ # version 450
+ void test(float a) {}
+ void test(uint a) {}
+
+ void main() {
+ test(1);
+ }
+ "#,
+ )
+ .err()
+ .unwrap(),
+ vec![Error {
+ kind: ErrorKind::SemanticError("Ambiguous best function for \'test\'".into()),
+ meta: Span::new(158, 165),
+ }]
+ );
+}
+
+#[test]
+fn structs() {
+ let mut frontend = Frontend::default();
+
+ frontend
+ .parse(
+ &Options::from(ShaderStage::Vertex),
+ r#"
+ # version 450
+ Test {
+ vec4 pos;
+ } xx;
+
+ void main() {}
+ "#,
+ )
+ .unwrap_err();
+
+ frontend
+ .parse(
+ &Options::from(ShaderStage::Vertex),
+ r#"
+ # version 450
+ struct Test {
+ vec4 pos;
+ };
+
+ void main() {}
+ "#,
+ )
+ .unwrap();
+
+ frontend
+ .parse(
+ &Options::from(ShaderStage::Vertex),
+ r#"
+ # version 450
+ const int NUM_VECS = 42;
+ struct Test {
+ vec4 vecs[NUM_VECS];
+ };
+
+ void main() {}
+ "#,
+ )
+ .unwrap();
+
+ frontend
+ .parse(
+ &Options::from(ShaderStage::Vertex),
+ r#"
+ # version 450
+ struct Hello {
+ vec4 test;
+ } test() {
+ return Hello( vec4(1.0) );
+ }
+
+ void main() {}
+ "#,
+ )
+ .unwrap();
+
+ frontend
+ .parse(
+ &Options::from(ShaderStage::Vertex),
+ r#"
+ # version 450
+ struct Test {};
+
+ void main() {}
+ "#,
+ )
+ .unwrap_err();
+
+ frontend
+ .parse(
+ &Options::from(ShaderStage::Vertex),
+ r#"
+ # version 450
+ inout struct Test {
+ vec4 x;
+ };
+
+ void main() {}
+ "#,
+ )
+ .unwrap_err();
+}
+
+#[test]
+fn swizzles() {
+ let mut frontend = Frontend::default();
+
+ frontend
+ .parse(
+ &Options::from(ShaderStage::Vertex),
+ r#"
+ # version 450
+ void main() {
+ vec4 v = vec4(1);
+ v.xyz = vec3(2);
+ v.x = 5.0;
+ v.xyz.zxy.yx.xy = vec2(5.0, 1.0);
+ }
+ "#,
+ )
+ .unwrap();
+
+ frontend
+ .parse(
+ &Options::from(ShaderStage::Vertex),
+ r#"
+ # version 450
+ void main() {
+ vec4 v = vec4(1);
+ v.xx = vec2(5.0);
+ }
+ "#,
+ )
+ .unwrap_err();
+
+ frontend
+ .parse(
+ &Options::from(ShaderStage::Vertex),
+ r#"
+ # version 450
+ void main() {
+ vec3 v = vec3(1);
+ v.w = 2.0;
+ }
+ "#,
+ )
+ .unwrap_err();
+}
+
+#[test]
+fn expressions() {
+ let mut frontend = Frontend::default();
+
+ // Vector indexing
+ frontend
+ .parse(
+ &Options::from(ShaderStage::Vertex),
+ r#"
+ # version 450
+ float test(int index) {
+ vec4 v = vec4(1.0, 2.0, 3.0, 4.0);
+ return v[index] + 1.0;
+ }
+
+ void main() {}
+ "#,
+ )
+ .unwrap();
+
+ // Prefix increment/decrement
+ frontend
+ .parse(
+ &Options::from(ShaderStage::Vertex),
+ r#"
+ # version 450
+ void main() {
+ uint index = 0;
+
+ --index;
+ ++index;
+ }
+ "#,
+ )
+ .unwrap();
+
+ // Dynamic indexing of array
+ frontend
+ .parse(
+ &Options::from(ShaderStage::Vertex),
+ r#"
+ # version 450
+ void main() {
+ const vec4 positions[1] = { vec4(0) };
+
+ gl_Position = positions[gl_VertexIndex];
+ }
+ "#,
+ )
+ .unwrap();
+}
diff --git a/third_party/rust/naga/src/front/glsl/token.rs b/third_party/rust/naga/src/front/glsl/token.rs
new file mode 100644
index 0000000000..74f10cbf85
--- /dev/null
+++ b/third_party/rust/naga/src/front/glsl/token.rs
@@ -0,0 +1,137 @@
+pub use pp_rs::token::{Float, Integer, Location, PreprocessorError, Token as PPToken};
+
+use super::ast::Precision;
+use crate::{Interpolation, Sampling, Span, Type};
+
+impl From<Location> for Span {
+ fn from(loc: Location) -> Self {
+ Span::new(loc.start, loc.end)
+ }
+}
+
+#[derive(Debug)]
+#[cfg_attr(test, derive(PartialEq))]
+pub struct Token {
+ pub value: TokenValue,
+ pub meta: Span,
+}
+
+/// A token passed from the lexing used in the parsing.
+///
+/// This type is exported since it's returned in the
+/// [`InvalidToken`](super::ErrorKind::InvalidToken) error.
+#[derive(Debug, PartialEq)]
+pub enum TokenValue {
+ Identifier(String),
+
+ FloatConstant(Float),
+ IntConstant(Integer),
+ BoolConstant(bool),
+
+ Layout,
+ In,
+ Out,
+ InOut,
+ Uniform,
+ Buffer,
+ Const,
+ Shared,
+
+ Restrict,
+ /// A `glsl` memory qualifier such as `writeonly`
+ ///
+ /// The associated [`crate::StorageAccess`] is the access being allowed
+ /// (for example `writeonly` has an associated value of [`crate::StorageAccess::STORE`])
+ MemoryQualifier(crate::StorageAccess),
+
+ Invariant,
+ Interpolation(Interpolation),
+ Sampling(Sampling),
+ Precision,
+ PrecisionQualifier(Precision),
+
+ Continue,
+ Break,
+ Return,
+ Discard,
+
+ If,
+ Else,
+ Switch,
+ Case,
+ Default,
+ While,
+ Do,
+ For,
+
+ Void,
+ Struct,
+ TypeName(Type),
+
+ Assign,
+ AddAssign,
+ SubAssign,
+ MulAssign,
+ DivAssign,
+ ModAssign,
+ LeftShiftAssign,
+ RightShiftAssign,
+ AndAssign,
+ XorAssign,
+ OrAssign,
+
+ Increment,
+ Decrement,
+
+ LogicalOr,
+ LogicalAnd,
+ LogicalXor,
+
+ LessEqual,
+ GreaterEqual,
+ Equal,
+ NotEqual,
+
+ LeftShift,
+ RightShift,
+
+ LeftBrace,
+ RightBrace,
+ LeftParen,
+ RightParen,
+ LeftBracket,
+ RightBracket,
+ LeftAngle,
+ RightAngle,
+
+ Comma,
+ Semicolon,
+ Colon,
+ Dot,
+ Bang,
+ Dash,
+ Tilde,
+ Plus,
+ Star,
+ Slash,
+ Percent,
+ VerticalBar,
+ Caret,
+ Ampersand,
+ Question,
+}
+
+#[derive(Debug)]
+#[cfg_attr(test, derive(PartialEq))]
+pub struct Directive {
+ pub kind: DirectiveKind,
+ pub tokens: Vec<PPToken>,
+}
+
+#[derive(Debug)]
+#[cfg_attr(test, derive(PartialEq))]
+pub enum DirectiveKind {
+ Version { is_first_directive: bool },
+ Extension,
+ Pragma,
+}
diff --git a/third_party/rust/naga/src/front/glsl/types.rs b/third_party/rust/naga/src/front/glsl/types.rs
new file mode 100644
index 0000000000..a7967848d5
--- /dev/null
+++ b/third_party/rust/naga/src/front/glsl/types.rs
@@ -0,0 +1,335 @@
+use super::{
+ constants::ConstantSolver, context::Context, Error, ErrorKind, Frontend, Result, Span,
+};
+use crate::{
+ proc::ResolveContext, Bytes, Constant, Expression, Handle, ImageClass, ImageDimension,
+ ScalarKind, Type, TypeInner, VectorSize,
+};
+
+pub fn parse_type(type_name: &str) -> Option<Type> {
+ match type_name {
+ "bool" => Some(Type {
+ name: None,
+ inner: TypeInner::Scalar {
+ kind: ScalarKind::Bool,
+ width: crate::BOOL_WIDTH,
+ },
+ }),
+ "float" => Some(Type {
+ name: None,
+ inner: TypeInner::Scalar {
+ kind: ScalarKind::Float,
+ width: 4,
+ },
+ }),
+ "double" => Some(Type {
+ name: None,
+ inner: TypeInner::Scalar {
+ kind: ScalarKind::Float,
+ width: 8,
+ },
+ }),
+ "int" => Some(Type {
+ name: None,
+ inner: TypeInner::Scalar {
+ kind: ScalarKind::Sint,
+ width: 4,
+ },
+ }),
+ "uint" => Some(Type {
+ name: None,
+ inner: TypeInner::Scalar {
+ kind: ScalarKind::Uint,
+ width: 4,
+ },
+ }),
+ "sampler" | "samplerShadow" => Some(Type {
+ name: None,
+ inner: TypeInner::Sampler {
+ comparison: type_name == "samplerShadow",
+ },
+ }),
+ word => {
+ fn kind_width_parse(ty: &str) -> Option<(ScalarKind, u8)> {
+ Some(match ty {
+ "" => (ScalarKind::Float, 4),
+ "b" => (ScalarKind::Bool, crate::BOOL_WIDTH),
+ "i" => (ScalarKind::Sint, 4),
+ "u" => (ScalarKind::Uint, 4),
+ "d" => (ScalarKind::Float, 8),
+ _ => return None,
+ })
+ }
+
+ fn size_parse(n: &str) -> Option<VectorSize> {
+ Some(match n {
+ "2" => VectorSize::Bi,
+ "3" => VectorSize::Tri,
+ "4" => VectorSize::Quad,
+ _ => return None,
+ })
+ }
+
+ let vec_parse = |word: &str| {
+ let mut iter = word.split("vec");
+
+ let kind = iter.next()?;
+ let size = iter.next()?;
+ let (kind, width) = kind_width_parse(kind)?;
+ let size = size_parse(size)?;
+
+ Some(Type {
+ name: None,
+ inner: TypeInner::Vector { size, kind, width },
+ })
+ };
+
+ let mat_parse = |word: &str| {
+ let mut iter = word.split("mat");
+
+ let kind = iter.next()?;
+ let size = iter.next()?;
+ let (_, width) = kind_width_parse(kind)?;
+
+ let (columns, rows) = if let Some(size) = size_parse(size) {
+ (size, size)
+ } else {
+ let mut iter = size.split('x');
+ match (iter.next()?, iter.next()?, iter.next()) {
+ (col, row, None) => (size_parse(col)?, size_parse(row)?),
+ _ => return None,
+ }
+ };
+
+ Some(Type {
+ name: None,
+ inner: TypeInner::Matrix {
+ columns,
+ rows,
+ width,
+ },
+ })
+ };
+
+ let texture_parse = |word: &str| {
+ let mut iter = word.split("texture");
+
+ let texture_kind = |ty| {
+ Some(match ty {
+ "" => ScalarKind::Float,
+ "i" => ScalarKind::Sint,
+ "u" => ScalarKind::Uint,
+ _ => return None,
+ })
+ };
+
+ let kind = iter.next()?;
+ let size = iter.next()?;
+ let kind = texture_kind(kind)?;
+
+ let sampled = |multi| ImageClass::Sampled { kind, multi };
+
+ let (dim, arrayed, class) = match size {
+ "1D" => (ImageDimension::D1, false, sampled(false)),
+ "1DArray" => (ImageDimension::D1, true, sampled(false)),
+ "2D" => (ImageDimension::D2, false, sampled(false)),
+ "2DArray" => (ImageDimension::D2, true, sampled(false)),
+ "2DMS" => (ImageDimension::D2, false, sampled(true)),
+ "2DMSArray" => (ImageDimension::D2, true, sampled(true)),
+ "3D" => (ImageDimension::D3, false, sampled(false)),
+ "Cube" => (ImageDimension::Cube, false, sampled(false)),
+ "CubeArray" => (ImageDimension::Cube, true, sampled(false)),
+ _ => return None,
+ };
+
+ Some(Type {
+ name: None,
+ inner: TypeInner::Image {
+ dim,
+ arrayed,
+ class,
+ },
+ })
+ };
+
+ let image_parse = |word: &str| {
+ let mut iter = word.split("image");
+
+ let texture_kind = |ty| {
+ Some(match ty {
+ "" => ScalarKind::Float,
+ "i" => ScalarKind::Sint,
+ "u" => ScalarKind::Uint,
+ _ => return None,
+ })
+ };
+
+ let kind = iter.next()?;
+ let size = iter.next()?;
+ // TODO: Check that the texture format and the kind match
+ let _ = texture_kind(kind)?;
+
+ let class = ImageClass::Storage {
+ format: crate::StorageFormat::R8Uint,
+ access: crate::StorageAccess::all(),
+ };
+
+ // TODO: glsl support multisampled storage images, naga doesn't
+ let (dim, arrayed) = match size {
+ "1D" => (ImageDimension::D1, false),
+ "1DArray" => (ImageDimension::D1, true),
+ "2D" => (ImageDimension::D2, false),
+ "2DArray" => (ImageDimension::D2, true),
+ "3D" => (ImageDimension::D3, false),
+ // Naga doesn't support cube images and it's usefulness
+ // is questionable, so they won't be supported for now
+ // "Cube" => (ImageDimension::Cube, false),
+ // "CubeArray" => (ImageDimension::Cube, true),
+ _ => return None,
+ };
+
+ Some(Type {
+ name: None,
+ inner: TypeInner::Image {
+ dim,
+ arrayed,
+ class,
+ },
+ })
+ };
+
+ vec_parse(word)
+ .or_else(|| mat_parse(word))
+ .or_else(|| texture_parse(word))
+ .or_else(|| image_parse(word))
+ }
+ }
+}
+
+pub const fn scalar_components(ty: &TypeInner) -> Option<(ScalarKind, Bytes)> {
+ match *ty {
+ TypeInner::Scalar { kind, width } => Some((kind, width)),
+ TypeInner::Vector { kind, width, .. } => Some((kind, width)),
+ TypeInner::Matrix { width, .. } => Some((ScalarKind::Float, width)),
+ TypeInner::ValuePointer { kind, width, .. } => Some((kind, width)),
+ _ => None,
+ }
+}
+
+pub const fn type_power(kind: ScalarKind, width: Bytes) -> Option<u32> {
+ Some(match kind {
+ ScalarKind::Sint => 0,
+ ScalarKind::Uint => 1,
+ ScalarKind::Float if width == 4 => 2,
+ ScalarKind::Float => 3,
+ ScalarKind::Bool => return None,
+ })
+}
+
+impl Frontend {
+ /// Resolves the types of the expressions until `expr` (inclusive)
+ ///
+ /// This needs to be done before the [`typifier`] can be queried for
+ /// the types of the expressions in the range between the last grow and `expr`.
+ ///
+ /// # Note
+ ///
+ /// The `resolve_type*` methods (like [`resolve_type`]) automatically
+ /// grow the [`typifier`] so calling this method is not necessary when using
+ /// them.
+ ///
+ /// [`typifier`]: Context::typifier
+ /// [`resolve_type`]: Self::resolve_type
+ pub(crate) fn typifier_grow(
+ &self,
+ ctx: &mut Context,
+ expr: Handle<Expression>,
+ meta: Span,
+ ) -> Result<()> {
+ let resolve_ctx = ResolveContext::with_locals(&self.module, &ctx.locals, &ctx.arguments);
+
+ ctx.typifier
+ .grow(expr, &ctx.expressions, &resolve_ctx)
+ .map_err(|error| Error {
+ kind: ErrorKind::SemanticError(format!("Can't resolve type: {error:?}").into()),
+ meta,
+ })
+ }
+
+ /// Gets the type for the result of the `expr` expression
+ ///
+ /// Automatically grows the [`typifier`] to `expr` so calling
+ /// [`typifier_grow`] is not necessary
+ ///
+ /// [`typifier`]: Context::typifier
+ /// [`typifier_grow`]: Self::typifier_grow
+ pub(crate) fn resolve_type<'b>(
+ &'b self,
+ ctx: &'b mut Context,
+ expr: Handle<Expression>,
+ meta: Span,
+ ) -> Result<&'b TypeInner> {
+ self.typifier_grow(ctx, expr, meta)?;
+ Ok(ctx.typifier.get(expr, &self.module.types))
+ }
+
+ /// Gets the type handle for the result of the `expr` expression
+ ///
+ /// Automatically grows the [`typifier`] to `expr` so calling
+ /// [`typifier_grow`] is not necessary
+ ///
+ /// # Note
+ ///
+ /// Consider using [`resolve_type`] whenever possible
+ /// since it doesn't require adding each type to the [`types`] arena
+ /// and it doesn't need to mutably borrow the [`Parser`][Self]
+ ///
+ /// [`types`]: crate::Module::types
+ /// [`typifier`]: Context::typifier
+ /// [`typifier_grow`]: Self::typifier_grow
+ /// [`resolve_type`]: Self::resolve_type
+ pub(crate) fn resolve_type_handle(
+ &mut self,
+ ctx: &mut Context,
+ expr: Handle<Expression>,
+ meta: Span,
+ ) -> Result<Handle<Type>> {
+ self.typifier_grow(ctx, expr, meta)?;
+ Ok(ctx.typifier.register_type(expr, &mut self.module.types))
+ }
+
+ /// Invalidates the cached type resolution for `expr` forcing a recomputation
+ pub(crate) fn invalidate_expression<'b>(
+ &'b self,
+ ctx: &'b mut Context,
+ expr: Handle<Expression>,
+ meta: Span,
+ ) -> Result<()> {
+ let resolve_ctx = ResolveContext::with_locals(&self.module, &ctx.locals, &ctx.arguments);
+
+ ctx.typifier
+ .invalidate(expr, &ctx.expressions, &resolve_ctx)
+ .map_err(|error| Error {
+ kind: ErrorKind::SemanticError(format!("Can't resolve type: {error:?}").into()),
+ meta,
+ })
+ }
+
+ pub(crate) fn solve_constant(
+ &mut self,
+ ctx: &Context,
+ root: Handle<Expression>,
+ meta: Span,
+ ) -> Result<Handle<Constant>> {
+ let mut solver = ConstantSolver {
+ types: &mut self.module.types,
+ expressions: &ctx.expressions,
+ constants: &mut self.module.constants,
+ };
+
+ solver.solve(root).map_err(|e| Error {
+ kind: e.into(),
+ meta,
+ })
+ }
+}
diff --git a/third_party/rust/naga/src/front/glsl/variables.rs b/third_party/rust/naga/src/front/glsl/variables.rs
new file mode 100644
index 0000000000..4c0e0a927d
--- /dev/null
+++ b/third_party/rust/naga/src/front/glsl/variables.rs
@@ -0,0 +1,679 @@
+use super::{
+ ast::*,
+ context::{Context, ExprPos},
+ error::{Error, ErrorKind},
+ Frontend, Result, Span,
+};
+use crate::{
+ AddressSpace, Binding, Block, BuiltIn, Constant, Expression, GlobalVariable, Handle,
+ Interpolation, LocalVariable, ResourceBinding, ScalarKind, ShaderStage, SwizzleComponent, Type,
+ TypeInner, VectorSize,
+};
+
+pub struct VarDeclaration<'a, 'key> {
+ pub qualifiers: &'a mut TypeQualifiers<'key>,
+ pub ty: Handle<Type>,
+ pub name: Option<String>,
+ pub init: Option<Handle<Constant>>,
+ pub meta: Span,
+}
+
+/// Information about a builtin used in [`add_builtin`](Frontend::add_builtin).
+struct BuiltInData {
+ /// The type of the builtin.
+ inner: TypeInner,
+ /// The associated builtin class.
+ builtin: BuiltIn,
+ /// Whether the builtin can be written to or not.
+ mutable: bool,
+ /// The storage used for the builtin.
+ storage: StorageQualifier,
+}
+
+pub enum GlobalOrConstant {
+ Global(Handle<GlobalVariable>),
+ Constant(Handle<Constant>),
+}
+
+impl Frontend {
+ /// Adds a builtin and returns a variable reference to it
+ fn add_builtin(
+ &mut self,
+ ctx: &mut Context,
+ body: &mut Block,
+ name: &str,
+ data: BuiltInData,
+ meta: Span,
+ ) -> Option<VariableReference> {
+ let ty = self.module.types.insert(
+ Type {
+ name: None,
+ inner: data.inner,
+ },
+ meta,
+ );
+
+ let handle = self.module.global_variables.append(
+ GlobalVariable {
+ name: Some(name.into()),
+ space: AddressSpace::Private,
+ binding: None,
+ ty,
+ init: None,
+ },
+ meta,
+ );
+
+ let idx = self.entry_args.len();
+ self.entry_args.push(EntryArg {
+ name: None,
+ binding: Binding::BuiltIn(data.builtin),
+ handle,
+ storage: data.storage,
+ });
+
+ self.global_variables.push((
+ name.into(),
+ GlobalLookup {
+ kind: GlobalLookupKind::Variable(handle),
+ entry_arg: Some(idx),
+ mutable: data.mutable,
+ },
+ ));
+
+ let expr = ctx.add_expression(Expression::GlobalVariable(handle), meta, body);
+
+ let var = VariableReference {
+ expr,
+ load: true,
+ mutable: data.mutable,
+ constant: None,
+ entry_arg: Some(idx),
+ };
+
+ ctx.symbol_table.add_root(name.into(), var.clone());
+
+ Some(var)
+ }
+
+ pub(crate) fn lookup_variable(
+ &mut self,
+ ctx: &mut Context,
+ body: &mut Block,
+ name: &str,
+ meta: Span,
+ ) -> Option<VariableReference> {
+ if let Some(var) = ctx.symbol_table.lookup(name).cloned() {
+ return Some(var);
+ }
+
+ let data = match name {
+ "gl_Position" => BuiltInData {
+ inner: TypeInner::Vector {
+ size: VectorSize::Quad,
+ kind: ScalarKind::Float,
+ width: 4,
+ },
+ builtin: BuiltIn::Position { invariant: false },
+ mutable: true,
+ storage: StorageQualifier::Output,
+ },
+ "gl_FragCoord" => BuiltInData {
+ inner: TypeInner::Vector {
+ size: VectorSize::Quad,
+ kind: ScalarKind::Float,
+ width: 4,
+ },
+ builtin: BuiltIn::Position { invariant: false },
+ mutable: false,
+ storage: StorageQualifier::Input,
+ },
+ "gl_PointCoord" => BuiltInData {
+ inner: TypeInner::Vector {
+ size: VectorSize::Bi,
+ kind: ScalarKind::Float,
+ width: 4,
+ },
+ builtin: BuiltIn::PointCoord,
+ mutable: false,
+ storage: StorageQualifier::Input,
+ },
+ "gl_GlobalInvocationID"
+ | "gl_NumWorkGroups"
+ | "gl_WorkGroupSize"
+ | "gl_WorkGroupID"
+ | "gl_LocalInvocationID" => BuiltInData {
+ inner: TypeInner::Vector {
+ size: VectorSize::Tri,
+ kind: ScalarKind::Uint,
+ width: 4,
+ },
+ builtin: match name {
+ "gl_GlobalInvocationID" => BuiltIn::GlobalInvocationId,
+ "gl_NumWorkGroups" => BuiltIn::NumWorkGroups,
+ "gl_WorkGroupSize" => BuiltIn::WorkGroupSize,
+ "gl_WorkGroupID" => BuiltIn::WorkGroupId,
+ "gl_LocalInvocationID" => BuiltIn::LocalInvocationId,
+ _ => unreachable!(),
+ },
+ mutable: false,
+ storage: StorageQualifier::Input,
+ },
+ "gl_FrontFacing" => BuiltInData {
+ inner: TypeInner::Scalar {
+ kind: ScalarKind::Bool,
+ width: crate::BOOL_WIDTH,
+ },
+ builtin: BuiltIn::FrontFacing,
+ mutable: false,
+ storage: StorageQualifier::Input,
+ },
+ "gl_PointSize" | "gl_FragDepth" => BuiltInData {
+ inner: TypeInner::Scalar {
+ kind: ScalarKind::Float,
+ width: 4,
+ },
+ builtin: match name {
+ "gl_PointSize" => BuiltIn::PointSize,
+ "gl_FragDepth" => BuiltIn::FragDepth,
+ _ => unreachable!(),
+ },
+ mutable: true,
+ storage: StorageQualifier::Output,
+ },
+ "gl_ClipDistance" | "gl_CullDistance" => {
+ let base = self.module.types.insert(
+ Type {
+ name: None,
+ inner: TypeInner::Scalar {
+ kind: ScalarKind::Float,
+ width: 4,
+ },
+ },
+ meta,
+ );
+
+ BuiltInData {
+ inner: TypeInner::Array {
+ base,
+ size: crate::ArraySize::Dynamic,
+ stride: 4,
+ },
+ builtin: match name {
+ "gl_ClipDistance" => BuiltIn::ClipDistance,
+ "gl_CullDistance" => BuiltIn::CullDistance,
+ _ => unreachable!(),
+ },
+ mutable: self.meta.stage == ShaderStage::Vertex,
+ storage: StorageQualifier::Output,
+ }
+ }
+ _ => {
+ let builtin = match name {
+ "gl_BaseVertex" => BuiltIn::BaseVertex,
+ "gl_BaseInstance" => BuiltIn::BaseInstance,
+ "gl_PrimitiveID" => BuiltIn::PrimitiveIndex,
+ "gl_InstanceIndex" => BuiltIn::InstanceIndex,
+ "gl_VertexIndex" => BuiltIn::VertexIndex,
+ "gl_SampleID" => BuiltIn::SampleIndex,
+ "gl_LocalInvocationIndex" => BuiltIn::LocalInvocationIndex,
+ _ => return None,
+ };
+
+ BuiltInData {
+ inner: TypeInner::Scalar {
+ kind: ScalarKind::Uint,
+ width: 4,
+ },
+ builtin,
+ mutable: false,
+ storage: StorageQualifier::Input,
+ }
+ }
+ };
+
+ self.add_builtin(ctx, body, name, data, meta)
+ }
+
+ pub(crate) fn make_variable_invariant(
+ &mut self,
+ ctx: &mut Context,
+ body: &mut Block,
+ name: &str,
+ meta: Span,
+ ) {
+ if let Some(var) = self.lookup_variable(ctx, body, name, meta) {
+ if let Some(index) = var.entry_arg {
+ if let Binding::BuiltIn(BuiltIn::Position { ref mut invariant }) =
+ self.entry_args[index].binding
+ {
+ *invariant = true;
+ }
+ }
+ }
+ }
+
+ pub(crate) fn field_selection(
+ &mut self,
+ ctx: &mut Context,
+ pos: ExprPos,
+ body: &mut Block,
+ expression: Handle<Expression>,
+ name: &str,
+ meta: Span,
+ ) -> Result<Handle<Expression>> {
+ let (ty, is_pointer) = match *self.resolve_type(ctx, expression, meta)? {
+ TypeInner::Pointer { base, .. } => (&self.module.types[base].inner, true),
+ ref ty => (ty, false),
+ };
+ match *ty {
+ TypeInner::Struct { ref members, .. } => {
+ let index = members
+ .iter()
+ .position(|m| m.name == Some(name.into()))
+ .ok_or_else(|| Error {
+ kind: ErrorKind::UnknownField(name.into()),
+ meta,
+ })?;
+ let pointer = ctx.add_expression(
+ Expression::AccessIndex {
+ base: expression,
+ index: index as u32,
+ },
+ meta,
+ body,
+ );
+
+ Ok(match pos {
+ ExprPos::Rhs if is_pointer => {
+ ctx.add_expression(Expression::Load { pointer }, meta, body)
+ }
+ _ => pointer,
+ })
+ }
+ // swizzles (xyzw, rgba, stpq)
+ TypeInner::Vector { size, .. } => {
+ let check_swizzle_components = |comps: &str| {
+ name.chars()
+ .map(|c| {
+ comps
+ .find(c)
+ .filter(|i| *i < size as usize)
+ .map(|i| SwizzleComponent::from_index(i as u32))
+ })
+ .collect::<Option<Vec<SwizzleComponent>>>()
+ };
+
+ let components = check_swizzle_components("xyzw")
+ .or_else(|| check_swizzle_components("rgba"))
+ .or_else(|| check_swizzle_components("stpq"));
+
+ if let Some(components) = components {
+ if let ExprPos::Lhs = pos {
+ let not_unique = (1..components.len())
+ .any(|i| components[i..].contains(&components[i - 1]));
+ if not_unique {
+ self.errors.push(Error {
+ kind:
+ ErrorKind::SemanticError(
+ format!(
+ "swizzle cannot have duplicate components in left-hand-side expression for \"{name:?}\""
+ )
+ .into(),
+ ),
+ meta ,
+ })
+ }
+ }
+
+ let mut pattern = [SwizzleComponent::X; 4];
+ for (pat, component) in pattern.iter_mut().zip(&components) {
+ *pat = *component;
+ }
+
+ // flatten nested swizzles (vec.zyx.xy.x => vec.z)
+ let mut expression = expression;
+ if let Expression::Swizzle {
+ size: _,
+ vector,
+ pattern: ref src_pattern,
+ } = ctx[expression]
+ {
+ expression = vector;
+ for pat in &mut pattern {
+ *pat = src_pattern[pat.index() as usize];
+ }
+ }
+
+ let size = match components.len() {
+ // Swizzles with just one component are accesses and not swizzles
+ 1 => {
+ match pos {
+ // If the position is in the right hand side and the base
+ // vector is a pointer, load it, otherwise the swizzle would
+ // produce a pointer
+ ExprPos::Rhs if is_pointer => {
+ expression = ctx.add_expression(
+ Expression::Load {
+ pointer: expression,
+ },
+ meta,
+ body,
+ );
+ }
+ _ => {}
+ };
+ return Ok(ctx.add_expression(
+ Expression::AccessIndex {
+ base: expression,
+ index: pattern[0].index(),
+ },
+ meta,
+ body,
+ ));
+ }
+ 2 => VectorSize::Bi,
+ 3 => VectorSize::Tri,
+ 4 => VectorSize::Quad,
+ _ => {
+ self.errors.push(Error {
+ kind: ErrorKind::SemanticError(
+ format!("Bad swizzle size for \"{name:?}\"").into(),
+ ),
+ meta,
+ });
+
+ VectorSize::Quad
+ }
+ };
+
+ if is_pointer {
+ // NOTE: for lhs expression, this extra load ends up as an unused expr, because the
+ // assignment will extract the pointer and use it directly anyway. Unfortunately we
+ // need it for validation to pass, as swizzles cannot operate on pointer values.
+ expression = ctx.add_expression(
+ Expression::Load {
+ pointer: expression,
+ },
+ meta,
+ body,
+ );
+ }
+
+ Ok(ctx.add_expression(
+ Expression::Swizzle {
+ size,
+ vector: expression,
+ pattern,
+ },
+ meta,
+ body,
+ ))
+ } else {
+ Err(Error {
+ kind: ErrorKind::SemanticError(
+ format!("Invalid swizzle for vector \"{name}\"").into(),
+ ),
+ meta,
+ })
+ }
+ }
+ _ => Err(Error {
+ kind: ErrorKind::SemanticError(
+ format!("Can't lookup field on this type \"{name}\"").into(),
+ ),
+ meta,
+ }),
+ }
+ }
+
+ pub(crate) fn add_global_var(
+ &mut self,
+ ctx: &mut Context,
+ body: &mut Block,
+ VarDeclaration {
+ qualifiers,
+ mut ty,
+ name,
+ init,
+ meta,
+ }: VarDeclaration,
+ ) -> Result<GlobalOrConstant> {
+ let storage = qualifiers.storage.0;
+ let (ret, lookup) = match storage {
+ StorageQualifier::Input | StorageQualifier::Output => {
+ let input = storage == StorageQualifier::Input;
+ // TODO: glslang seems to use a counter for variables without
+ // explicit location (even if that causes collisions)
+ let location = qualifiers
+ .uint_layout_qualifier("location", &mut self.errors)
+ .unwrap_or(0);
+ let interpolation = qualifiers.interpolation.take().map(|(i, _)| i).or_else(|| {
+ let kind = self.module.types[ty].inner.scalar_kind()?;
+ Some(match kind {
+ ScalarKind::Float => Interpolation::Perspective,
+ _ => Interpolation::Flat,
+ })
+ });
+ let sampling = qualifiers.sampling.take().map(|(s, _)| s);
+
+ let handle = self.module.global_variables.append(
+ GlobalVariable {
+ name: name.clone(),
+ space: AddressSpace::Private,
+ binding: None,
+ ty,
+ init,
+ },
+ meta,
+ );
+
+ let idx = self.entry_args.len();
+ self.entry_args.push(EntryArg {
+ name: name.clone(),
+ binding: Binding::Location {
+ location,
+ interpolation,
+ sampling,
+ },
+ handle,
+ storage,
+ });
+
+ let lookup = GlobalLookup {
+ kind: GlobalLookupKind::Variable(handle),
+ entry_arg: Some(idx),
+ mutable: !input,
+ };
+
+ (GlobalOrConstant::Global(handle), lookup)
+ }
+ StorageQualifier::Const => {
+ let init = init.ok_or_else(|| Error {
+ kind: ErrorKind::SemanticError("const values must have an initializer".into()),
+ meta,
+ })?;
+
+ let lookup = GlobalLookup {
+ kind: GlobalLookupKind::Constant(init, ty),
+ entry_arg: None,
+ mutable: false,
+ };
+
+ if let Some(name) = name.as_ref() {
+ let constant = self.module.constants.get_mut(init);
+ if constant.name.is_none() {
+ // set the name of the constant
+ constant.name = Some(name.clone())
+ } else {
+ // add a copy of the constant with the new name
+ let new_const = Constant {
+ name: Some(name.clone()),
+ specialization: constant.specialization,
+ inner: constant.inner.clone(),
+ };
+ self.module.constants.fetch_or_append(new_const, meta);
+ }
+ }
+
+ (GlobalOrConstant::Constant(init), lookup)
+ }
+ StorageQualifier::AddressSpace(mut space) => {
+ match space {
+ AddressSpace::Storage { ref mut access } => {
+ if let Some((allowed_access, _)) = qualifiers.storage_access.take() {
+ *access = allowed_access;
+ }
+ }
+ AddressSpace::Uniform => match self.module.types[ty].inner {
+ TypeInner::Image {
+ class,
+ dim,
+ arrayed,
+ } => {
+ if let crate::ImageClass::Storage {
+ mut access,
+ mut format,
+ } = class
+ {
+ if let Some((allowed_access, _)) = qualifiers.storage_access.take()
+ {
+ access = allowed_access;
+ }
+
+ match qualifiers.layout_qualifiers.remove(&QualifierKey::Format) {
+ Some((QualifierValue::Format(f), _)) => format = f,
+ // TODO: glsl supports images without format qualifier
+ // if they are `writeonly`
+ None => self.errors.push(Error {
+ kind: ErrorKind::SemanticError(
+ "image types require a format layout qualifier".into(),
+ ),
+ meta,
+ }),
+ _ => unreachable!(),
+ }
+
+ ty = self.module.types.insert(
+ Type {
+ name: None,
+ inner: TypeInner::Image {
+ dim,
+ arrayed,
+ class: crate::ImageClass::Storage { format, access },
+ },
+ },
+ meta,
+ );
+ }
+
+ space = AddressSpace::Handle
+ }
+ TypeInner::Sampler { .. } => space = AddressSpace::Handle,
+ _ => {
+ if qualifiers.none_layout_qualifier("push_constant", &mut self.errors) {
+ space = AddressSpace::PushConstant
+ }
+ }
+ },
+ AddressSpace::Function => space = AddressSpace::Private,
+ _ => {}
+ };
+
+ let binding = match space {
+ AddressSpace::Uniform | AddressSpace::Storage { .. } | AddressSpace::Handle => {
+ let binding = qualifiers.uint_layout_qualifier("binding", &mut self.errors);
+ if binding.is_none() {
+ self.errors.push(Error {
+ kind: ErrorKind::SemanticError(
+ "uniform/buffer blocks require layout(binding=X)".into(),
+ ),
+ meta,
+ });
+ }
+ let set = qualifiers.uint_layout_qualifier("set", &mut self.errors);
+ binding.map(|binding| ResourceBinding {
+ group: set.unwrap_or(0),
+ binding,
+ })
+ }
+ _ => None,
+ };
+
+ let handle = self.module.global_variables.append(
+ GlobalVariable {
+ name: name.clone(),
+ space,
+ binding,
+ ty,
+ init,
+ },
+ meta,
+ );
+
+ let lookup = GlobalLookup {
+ kind: GlobalLookupKind::Variable(handle),
+ entry_arg: None,
+ mutable: true,
+ };
+
+ (GlobalOrConstant::Global(handle), lookup)
+ }
+ };
+
+ if let Some(name) = name {
+ ctx.add_global(self, &name, lookup, body);
+
+ self.global_variables.push((name, lookup));
+ }
+
+ qualifiers.unused_errors(&mut self.errors);
+
+ Ok(ret)
+ }
+
+ pub(crate) fn add_local_var(
+ &mut self,
+ ctx: &mut Context,
+ body: &mut Block,
+ decl: VarDeclaration,
+ ) -> Result<Handle<Expression>> {
+ let storage = decl.qualifiers.storage;
+ let mutable = match storage.0 {
+ StorageQualifier::AddressSpace(AddressSpace::Function) => true,
+ StorageQualifier::Const => false,
+ _ => {
+ self.errors.push(Error {
+ kind: ErrorKind::SemanticError("Locals cannot have a storage qualifier".into()),
+ meta: storage.1,
+ });
+ true
+ }
+ };
+
+ let handle = ctx.locals.append(
+ LocalVariable {
+ name: decl.name.clone(),
+ ty: decl.ty,
+ init: None,
+ },
+ decl.meta,
+ );
+ let expr = ctx.add_expression(Expression::LocalVariable(handle), decl.meta, body);
+
+ if let Some(name) = decl.name {
+ let maybe_var = ctx.add_local_var(name.clone(), expr, mutable);
+
+ if maybe_var.is_some() {
+ self.errors.push(Error {
+ kind: ErrorKind::VariableAlreadyDeclared(name),
+ meta: decl.meta,
+ })
+ }
+ }
+
+ decl.qualifiers.unused_errors(&mut self.errors);
+
+ Ok(expr)
+ }
+}